text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""napalm.junos package."""
# Import local modules
from napalm.junos.junos import JunOSDriver # noqa
|
{
"content_hash": "92e6fe5e9e8c0e31b93020f1864acacc",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 50,
"avg_line_length": 25.75,
"alnum_prop": 0.7475728155339806,
"repo_name": "napalm-automation/napalm",
"id": "54364ef717d11356623f756555e6a20e2fb76ea2",
"size": "728",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "napalm/junos/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "324"
},
{
"name": "Jinja",
"bytes": "17789"
},
{
"name": "Makefile",
"bytes": "117"
},
{
"name": "Python",
"bytes": "1142188"
},
{
"name": "Roff",
"bytes": "931"
},
{
"name": "Smarty",
"bytes": "14010"
}
],
"symlink_target": ""
}
|
"""
Support for an exposed aREST RESTful API of a device.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.arest/
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE, CONF_RESOURCE,
CONF_MONITORED_VARIABLES, CONF_NAME, STATE_UNKNOWN)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
CONF_FUNCTIONS = 'functions'
CONF_PINS = 'pins'
DEFAULT_NAME = 'aREST sensor'
PIN_VARIABLE_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PINS, default={}):
vol.Schema({cv.string: PIN_VARIABLE_SCHEMA}),
vol.Optional(CONF_MONITORED_VARIABLES, default={}):
vol.Schema({cv.string: PIN_VARIABLE_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the aREST sensor."""
resource = config.get(CONF_RESOURCE)
var_conf = config.get(CONF_MONITORED_VARIABLES)
pins = config.get(CONF_PINS)
try:
response = requests.get(resource, timeout=10).json()
except requests.exceptions.MissingSchema:
_LOGGER.error("Missing resource or schema in configuration. "
"Add http:// to your URL")
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device at %s", resource)
return False
arest = ArestData(resource)
def make_renderer(value_template):
"""Create a renderer based on variable_template value."""
if value_template is None:
return lambda value: value
value_template.hass = hass
def _render(value):
try:
return value_template.async_render({'value': value})
except TemplateError:
_LOGGER.exception("Error parsing value")
return value
return _render
dev = []
if var_conf is not None:
for variable, var_data in var_conf.items():
if variable not in response['variables']:
_LOGGER.error("Variable: %s does not exist", variable)
continue
renderer = make_renderer(var_data.get(CONF_VALUE_TEMPLATE))
dev.append(ArestSensor(
arest, resource, config.get(CONF_NAME, response[CONF_NAME]),
var_data.get(CONF_NAME, variable), variable=variable,
unit_of_measurement=var_data.get(CONF_UNIT_OF_MEASUREMENT),
renderer=renderer))
if pins is not None:
for pinnum, pin in pins.items():
renderer = make_renderer(pin.get(CONF_VALUE_TEMPLATE))
dev.append(ArestSensor(
ArestData(resource, pinnum), resource,
config.get(CONF_NAME, response[CONF_NAME]), pin.get(CONF_NAME),
pin=pinnum, unit_of_measurement=pin.get(
CONF_UNIT_OF_MEASUREMENT), renderer=renderer))
add_devices(dev)
class ArestSensor(Entity):
"""Implementation of an aREST sensor for exposed variables."""
def __init__(self, arest, resource, location, name, variable=None,
pin=None, unit_of_measurement=None, renderer=None):
"""Initialize the sensor."""
self.arest = arest
self._resource = resource
self._name = '{} {}'.format(location.title(), name.title())
self._variable = variable
self._pin = pin
self._state = STATE_UNKNOWN
self._unit_of_measurement = unit_of_measurement
self._renderer = renderer
self.update()
if self._pin is not None:
request = requests.get(
'{}/mode/{}/i'.format(self._resource, self._pin), timeout=10)
if request.status_code is not 200:
_LOGGER.error("Can't set mode of %s", self._resource)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
values = self.arest.data
if 'error' in values:
return values['error']
value = self._renderer(
values.get('value', values.get(self._variable, STATE_UNKNOWN)))
return value
def update(self):
"""Get the latest data from aREST API."""
self.arest.update()
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.arest.available
class ArestData(object):
"""The Class for handling the data retrieval for variables."""
def __init__(self, resource, pin=None):
"""Initialize the data object."""
self._resource = resource
self._pin = pin
self.data = {}
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from aREST device."""
try:
if self._pin is None:
response = requests.get(self._resource, timeout=10)
self.data = response.json()['variables']
else:
try:
if str(self._pin[0]) == 'A':
response = requests.get('{}/analog/{}'.format(
self._resource, self._pin[1:]), timeout=10)
self.data = {'value': response.json()['return_value']}
except TypeError:
response = requests.get('{}/digital/{}'.format(
self._resource, self._pin), timeout=10)
self.data = {'value': response.json()['return_value']}
self.available = True
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device %s", self._resource)
self.available = False
|
{
"content_hash": "f5047192c2ef30c2552f2d8711215fd6",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 79,
"avg_line_length": 34.34375,
"alnum_prop": 0.6079769487412799,
"repo_name": "JshWright/home-assistant",
"id": "6edef785280e807b5870828e19a1ad2d0a869df2",
"size": "6594",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/arest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1808411"
},
{
"name": "Python",
"bytes": "6070409"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15525"
}
],
"symlink_target": ""
}
|
import os
try:
import zlib
binascii = zlib
except ImportError:
zlib = None
import binascii
from base64 import urlsafe_b64encode
from auth import up as auth_up
import conf
_workers = 1
_task_queue_size = _workers * 4
_try_times = 3
_block_bits = 22
_block_size = 1 << _block_bits
_block_mask = _block_size - 1
_chunk_size = _block_size # 简化模式,弃用
class ResumableIoError(object):
value = None
def __init__(self, value):
self.value = value
return
def __str__(self):
return self.value
err_invalid_put_progress = ResumableIoError("invalid put progress")
err_put_failed = ResumableIoError("resumable put failed")
err_unmatched_checksum = ResumableIoError("unmatched checksum")
err_putExtra_type = ResumableIoError("extra must the instance of PutExtra")
def setup(chunk_size=0, try_times=0):
global _chunk_size, _try_times
_chunk_size = 1 << 22 if chunk_size <= 0 else chunk_size
_try_times = 3 if try_times == 0 else try_times
return
def gen_crc32(data):
return binascii.crc32(data) & 0xffffffff
class PutExtra(object):
params = None # 自定义用户变量, key需要x: 开头
mimetype = None # 可选。在 uptoken 没有指定 DetectMime 时,用户客户端可自己指定 MimeType
chunk_size = None # 可选。每次上传的Chunk大小 简化模式,弃用
try_times = None # 可选。尝试次数
progresses = None # 可选。上传进度
notify = lambda self, idx, size, ret: None # 可选。进度提示
notify_err = lambda self, idx, size, err: None
def __init__(self, bucket=None):
self.bucket = bucket
return
def put_file(uptoken, key, localfile, extra):
""" 上传文件 """
f = open(localfile, "rb")
statinfo = os.stat(localfile)
ret, err = put(uptoken, key, f, statinfo.st_size, extra)
f.close()
return ret, err
def put(uptoken, key, f, fsize, extra):
""" 上传二进制流, 通过将data "切片" 分段上传 """
if not isinstance(extra, PutExtra):
print("extra must the instance of PutExtra")
return
host = conf.UP_HOST
try:
ret, err, code = put_with_host(uptoken, key, f, fsize, extra, host)
if err is None or code / 100 == 4 or code == 579 or code / 100 == 6 or code / 100 == 7:
return ret, err
except:
pass
ret, err, code = put_with_host(uptoken, key, f, fsize, extra, conf.UP_HOST2)
return ret, err
def put_with_host(uptoken, key, f, fsize, extra, host):
block_cnt = block_count(fsize)
if extra.progresses is None:
extra.progresses = [None] * block_cnt
else:
if not len(extra.progresses) == block_cnt:
return None, err_invalid_put_progress, 0
if extra.try_times is None:
extra.try_times = _try_times
if extra.chunk_size is None:
extra.chunk_size = _chunk_size
for i in xrange(block_cnt):
try_time = extra.try_times
read_length = _block_size
if (i + 1) * _block_size > fsize:
read_length = fsize - i * _block_size
data_slice = f.read(read_length)
while True:
err = resumable_block_put(data_slice, i, extra, uptoken, host)
if err is None:
break
try_time -= 1
if try_time <= 0:
return None, err_put_failed, 0
print err, ".. retry"
mkfile_host = extra.progresses[-1]["host"] if block_cnt else host
mkfile_client = auth_up.Client(uptoken, mkfile_host)
return mkfile(mkfile_client, key, fsize, extra, host)
def resumable_block_put(block, index, extra, uptoken, host):
block_size = len(block)
mkblk_client = auth_up.Client(uptoken, host)
if extra.progresses[index] is None or "ctx" not in extra.progresses[index]:
crc32 = gen_crc32(block)
block = bytearray(block)
extra.progresses[index], err, code = mkblock(mkblk_client, block_size, block, host)
if err is not None:
extra.notify_err(index, block_size, err)
return err
if not extra.progresses[index]["crc32"] == crc32:
return err_unmatched_checksum
extra.notify(index, block_size, extra.progresses[index])
return
def block_count(size):
global _block_size
return (size + _block_mask) / _block_size
def mkblock(client, block_size, first_chunk, host):
url = "http://%s/mkblk/%s" % (host, block_size)
content_type = "application/octet-stream"
return client.call_with(url, first_chunk, content_type, len(first_chunk))
def putblock(client, block_ret, chunk):
url = "%s/bput/%s/%s" % (block_ret["host"],
block_ret["ctx"], block_ret["offset"])
content_type = "application/octet-stream"
return client.call_with(url, chunk, content_type, len(chunk))
def mkfile(client, key, fsize, extra, host):
url = ["http://%s/mkfile/%s" % (host, fsize)]
if extra.mimetype:
url.append("mimeType/%s" % urlsafe_b64encode(extra.mimetype))
if key is not None:
url.append("key/%s" % urlsafe_b64encode(key))
if extra.params:
for k, v in extra.params.iteritems():
url.append("%s/%s" % (k, urlsafe_b64encode(v)))
url = "/".join(url)
body = ",".join([i["ctx"] for i in extra.progresses])
return client.call_with(url, body, "text/plain", len(body))
|
{
"content_hash": "1a469c421701a4447fa333dc32ecc462",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 95,
"avg_line_length": 29.857954545454547,
"alnum_prop": 0.6137012369172217,
"repo_name": "jemygraw/qrsync-python",
"id": "cc544b86ea3d219c80752fffb20ccfea6a3c33fd",
"size": "5460",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qiniu/resumable_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60187"
}
],
"symlink_target": ""
}
|
def index():
response.flash = T('Welcome to my resume/cv.')
response.title = T('KPW CV')
#projects = db(db.project).select()
projects = ''
posts = ''
courses = db(db.course).select(orderby=db.course.title)
jobs = db(db.professional_experience.intern == False).select(orderby=~db.professional_experience.datefrom)
internships = db(db.professional_experience.intern == True).select(orderby=~db.professional_experience.datefrom)
volunteering = db(db.volunteer_experience).select(orderby=~db.volunteer_experience.datefrom)
organizations = db(db.organization).select()
diplomas = db(db.diploma).select(orderby=~db.diploma.datefrom)
technical = {}
for tech in TECH_TYPE:
technical[tech] = db(db.technical_experience.tech_type==tech).select()
communications = {}
for comm in COMM_TYPE:
communications[comm] = db(db.communication_experience.comm_type==comm).select()
return dict(projects=projects,posts=posts,courses=courses,jobs=jobs,internships=internships,volunteering=volunteering,technical=technical,communications=communications,organizations=organizations,diplomas=diplomas)
|
{
"content_hash": "d745c327f9150fad192857eb1169d37c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 218,
"avg_line_length": 60.73684210526316,
"alnum_prop": 0.733102253032929,
"repo_name": "highlanderkev/kpw.org",
"id": "032b727bb310cb753b39525d75a9148ee0c53a0e",
"size": "1420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kpw-org/controllers/cv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "118673"
},
{
"name": "HTML",
"bytes": "66391"
},
{
"name": "JavaScript",
"bytes": "32484"
},
{
"name": "Python",
"bytes": "265721"
}
],
"symlink_target": ""
}
|
import atexit
import telemetry.internal.platform.power_monitor as power_monitor
def _ReenableChargingIfNeeded(battery):
if not battery.GetCharging():
battery.TieredSetCharging(True)
class PowerMonitorController(power_monitor.PowerMonitor):
"""
PowerMonitor that acts as facade for a list of PowerMonitor objects and uses
the first available one.
"""
def __init__(self, power_monitors, battery):
super(PowerMonitorController, self).__init__()
self._cascading_power_monitors = power_monitors
self._active_monitor = None
self._battery = battery
atexit.register(_ReenableChargingIfNeeded, self._battery)
def _AsyncPowerMonitor(self):
return next(
(x for x in self._cascading_power_monitors if x.CanMonitorPower()),
None)
def CanMonitorPower(self):
return bool(self._AsyncPowerMonitor())
def StartMonitoringPower(self, browser):
self._active_monitor = self._AsyncPowerMonitor()
assert self._active_monitor, 'No available monitor.'
self._active_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
assert self._active_monitor, 'StartMonitoringPower() not called.'
try:
return self._active_monitor.StopMonitoringPower()
finally:
self._active_monitor = None
|
{
"content_hash": "1091e3348652b9f97b1a5713bee97407",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 31.95,
"alnum_prop": 0.729264475743349,
"repo_name": "googlearchive/big-rig",
"id": "31c98180e4bb16676db1aec65476961bef003725",
"size": "1441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/src/thirdparty/telemetry/internal/platform/power_monitor/power_monitor_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "116452"
},
{
"name": "HTML",
"bytes": "50006"
},
{
"name": "JavaScript",
"bytes": "114907"
},
{
"name": "Python",
"bytes": "70340"
}
],
"symlink_target": ""
}
|
"""
Created on Wed Dec 7 12:43:48 2016
@author: cristinamenghini
"""
""" --------------------------------------------------------------------------
This script contains helper functions for the parser.
----------------------------------------------------------------------------"""
# Import useful libraries
import re
import json
def find_match(list_prova, string):
""" This function returns the text of the article and the matches, in the
article, with the provided string.
It takes as inputs:
@list_prova: list of the elements present in the tag "text"
@string: string to match in the text"""
# Join the elements of @list_prova in a unique string
text = ' '.join(list_prova)
# Replace '\n' with whitespace
mod_text = text.replace('\n', ' ')
# Look up for matches with 'Matteo Renzi' performing a case-insensitive
# matching.
match = re.search(string, mod_text, flags=re.I)
return mod_text, match
def load_json(title, text_to_load, language, topic):
""" This function create (whether it doesn't exist) and append to a .json
file the title and the content of each article that contains the *string*
of interest.
It takes as inputs:
@title: name of the article
@text_to_load: text of the article
@language: language of the article
@topic: string (i.e. word, regular expression)"""
# Re-define the topic
topic = topic.replace(' ','_')
# Remark: here the .json file is open for each article, whether the number of
# access to the file is big, it can be slow. Thus would be better to open the
# the file once and then close it at the end of the operations.
with open('Corpus/wiki_' + language + '_' + topic + '.json', "a+") as json_file:
# Each element of the json is stored in line
json_file.write("{}\n".format(json.dumps({'title': title, 'text': text_to_load})))
|
{
"content_hash": "15cd7cad090bc3a1a4f6267739967135",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 90,
"avg_line_length": 33.96551724137931,
"alnum_prop": 0.5903553299492386,
"repo_name": "CriMenghini/Wikipedia",
"id": "dc87db6d7f1d8b4aa6aee9b91f405c057652c344",
"size": "1994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Mention/helpers_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "58709"
},
{
"name": "Python",
"bytes": "18260"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from dashboard import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
)
|
{
"content_hash": "5426de3a7bea955016f5963afe05ed0f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 21.625,
"alnum_prop": 0.6820809248554913,
"repo_name": "simonv3/django-gulp-mithril-starter",
"id": "7f51e72bcd4460db0a14157224de7f5cae9ef07d",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14592"
},
{
"name": "HTML",
"bytes": "5404"
},
{
"name": "JavaScript",
"bytes": "14632"
},
{
"name": "Python",
"bytes": "6112"
}
],
"symlink_target": ""
}
|
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.streams import StreamsEosTestDriverService, StreamsEosTestJobRunnerService, \
StreamsComplexEosTestJobRunnerService, StreamsEosTestVerifyRunnerService, StreamsComplexEosTestVerifyRunnerService
class StreamsEosTest(KafkaTest):
"""
Test of Kafka Streams exactly-once semantics
"""
def __init__(self, test_context):
super(StreamsEosTest, self).__init__(test_context, num_zk=1, num_brokers=3, topics={
'data': {'partitions': 5, 'replication-factor': 2},
'echo': {'partitions': 5, 'replication-factor': 2},
'min': {'partitions': 5, 'replication-factor': 2},
'sum': {'partitions': 5, 'replication-factor': 2},
'repartition': {'partitions': 5, 'replication-factor': 2},
'max': {'partitions': 5, 'replication-factor': 2},
'cnt': {'partitions': 5, 'replication-factor': 2}
})
self.driver = StreamsEosTestDriverService(test_context, self.kafka)
self.test_context = test_context
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_beta")
def test_rebalance_simple(self, processing_guarantee):
self.run_rebalance(StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestVerifyRunnerService(self.test_context, self.kafka))
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_beta")
def test_rebalance_complex(self, processing_guarantee):
self.run_rebalance(StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestVerifyRunnerService(self.test_context, self.kafka))
def run_rebalance(self, processor1, processor2, processor3, verifier):
"""
Starts and stops two test clients a few times.
Ensure that all records are delivered exactly-once.
"""
self.driver.start()
self.add_streams(processor1)
processor1.clean_node_enabled = False
self.add_streams2(processor1, processor2)
self.add_streams3(processor1, processor2, processor3)
self.stop_streams3(processor2, processor3, processor1)
self.add_streams3(processor2, processor3, processor1)
self.stop_streams3(processor1, processor3, processor2)
self.stop_streams2(processor1, processor3)
self.stop_streams(processor1)
processor1.clean_node_enabled = True
self.driver.stop()
verifier.start()
verifier.wait()
verifier.node.account.ssh("grep ALL-RECORDS-DELIVERED %s" % verifier.STDOUT_FILE, allow_fail=False)
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_beta")
def test_failure_and_recovery(self, processing_guarantee):
self.run_failure_and_recovery(StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestVerifyRunnerService(self.test_context, self.kafka))
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_beta")
def test_failure_and_recovery_complex(self, processing_guarantee):
self.run_failure_and_recovery(StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestVerifyRunnerService(self.test_context, self.kafka))
def run_failure_and_recovery(self, processor1, processor2, processor3, verifier):
"""
Starts two test clients, then abort (kill -9) and restart them a few times.
Ensure that all records are delivered exactly-once.
"""
self.driver.start()
self.add_streams(processor1)
processor1.clean_node_enabled = False
self.add_streams2(processor1, processor2)
self.add_streams3(processor1, processor2, processor3)
self.abort_streams(processor2, processor3, processor1)
self.add_streams3(processor2, processor3, processor1)
self.abort_streams(processor2, processor3, processor1)
self.add_streams3(processor2, processor3, processor1)
self.abort_streams(processor1, processor3, processor2)
self.stop_streams2(processor1, processor3)
self.stop_streams(processor1)
processor1.clean_node_enabled = True
self.driver.stop()
verifier.start()
verifier.wait()
verifier.node.account.ssh("grep ALL-RECORDS-DELIVERED %s" % verifier.STDOUT_FILE, allow_fail=False)
def add_streams(self, processor):
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor:
processor.start()
self.wait_for_startup(monitor, processor)
def add_streams2(self, running_processor, processor_to_be_started):
with running_processor.node.account.monitor_log(running_processor.STDOUT_FILE) as monitor:
self.add_streams(processor_to_be_started)
self.wait_for_startup(monitor, running_processor)
def add_streams3(self, running_processor1, running_processor2, processor_to_be_started):
with running_processor1.node.account.monitor_log(running_processor1.STDOUT_FILE) as monitor:
self.add_streams2(running_processor2, processor_to_be_started)
self.wait_for_startup(monitor, running_processor1)
def stop_streams(self, processor_to_be_stopped):
with processor_to_be_stopped.node.account.monitor_log(processor_to_be_stopped.STDOUT_FILE) as monitor2:
processor_to_be_stopped.stop()
self.wait_for(monitor2, processor_to_be_stopped, "StateChange: PENDING_SHUTDOWN -> NOT_RUNNING")
def stop_streams2(self, keep_alive_processor, processor_to_be_stopped):
with keep_alive_processor.node.account.monitor_log(keep_alive_processor.STDOUT_FILE) as monitor:
self.stop_streams(processor_to_be_stopped)
self.wait_for_startup(monitor, keep_alive_processor)
def stop_streams3(self, keep_alive_processor1, keep_alive_processor2, processor_to_be_stopped):
with keep_alive_processor1.node.account.monitor_log(keep_alive_processor1.STDOUT_FILE) as monitor:
self.stop_streams2(keep_alive_processor2, processor_to_be_stopped)
self.wait_for_startup(monitor, keep_alive_processor1)
def abort_streams(self, keep_alive_processor1, keep_alive_processor2, processor_to_be_aborted):
with keep_alive_processor1.node.account.monitor_log(keep_alive_processor1.STDOUT_FILE) as monitor1:
with keep_alive_processor2.node.account.monitor_log(keep_alive_processor2.STDOUT_FILE) as monitor2:
processor_to_be_aborted.stop_nodes(False)
self.wait_for_startup(monitor2, keep_alive_processor2)
self.wait_for_startup(monitor1, keep_alive_processor1)
def wait_for_startup(self, monitor, processor):
self.wait_for(monitor, processor, "StateChange: REBALANCING -> RUNNING")
self.wait_for(monitor, processor, "processed [0-9]* records from topic")
def wait_for(self, monitor, processor, output):
monitor.wait_until(output,
timeout_sec=300,
err_msg=("Never saw output '%s' on " % output) + str(processor.node.account))
|
{
"content_hash": "58a4c38763100fd2d82745bd4a52a905",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 129,
"avg_line_length": 54.44654088050314,
"alnum_prop": 0.684301721150514,
"repo_name": "Chasego/kafka",
"id": "bf07cb41062464b2a4383cabf293659a614c641b",
"size": "9438",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/tests/streams/streams_eos_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "19756"
},
{
"name": "HTML",
"bytes": "5443"
},
{
"name": "Java",
"bytes": "1713663"
},
{
"name": "Python",
"bytes": "371085"
},
{
"name": "Scala",
"bytes": "2411268"
},
{
"name": "Shell",
"bytes": "85116"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
}
|
from .statement import Statement
from . import _import
class AssumeStmt(Statement):
def __init__(self, kwargs={}):
super(AssumeStmt, self).__init__(kwargs)
locs = _import()
# Expression expr
expr = kwargs.get(u'expr', {})
self._expr = locs[expr[u'@t']](expr) if expr else None
self.add_as_parent([self.expr])
@property
def expr(self): return self._expr
@expr.setter
def expr(self, v): self._expr = v
|
{
"content_hash": "7fdb61c1a9a02fa7c41ef9b072525e1f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 24.9,
"alnum_prop": 0.5602409638554217,
"repo_name": "plum-umd/java-sketch",
"id": "ba230b45682c94a66841e2a7ac5503b31f53fe21",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jskparser/ast/stmt/assumestmt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "44034"
},
{
"name": "Java",
"bytes": "5042035"
},
{
"name": "Makefile",
"bytes": "215"
},
{
"name": "Perl",
"bytes": "495"
},
{
"name": "Python",
"bytes": "600201"
},
{
"name": "Shell",
"bytes": "46731"
}
],
"symlink_target": ""
}
|
from django.forms import ModelForm, Textarea
from questions.models import Answer, Question
class AnswerForm(ModelForm):
class Meta:
model = Answer
fields = ['answer_text']
widgets = {
'answer_text': Textarea(attrs={'id': 'answer-text', 'required': 'True', 'cols': 70, 'rows': 10}),
}
class QuestionForm(ModelForm):
class Meta:
model = Question
fields = ['title', 'question']
# initial for function get_initial() in views, class QuestionCreate
"""def __init__(self, *args, **kwargs):
self.owner = kwargs['initial']['owner']
super(QuestionForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
obj = super(QuestionForm, self).save(False)
obj.owner = self.owner
commit and obj.save()
return obj"""
|
{
"content_hash": "9ef56c2e209246a98fb594d32008ad7f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 109,
"avg_line_length": 29.03448275862069,
"alnum_prop": 0.5973871733966746,
"repo_name": "Kuzenkov/SimpleAnalogueStackOverflow",
"id": "f488662d4ab99a441bc65fe151fe70bf197c711c",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12983"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "20568"
}
],
"symlink_target": ""
}
|
import os
from IPython.lib import passwd
#c = c # pylint:disable=undefined-variable
c = get_config()
c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
# sets a password if PASSWORD is set in the environment
if 'PASSWORD' in os.environ:
password = os.environ['PASSWORD']
if password:
c.NotebookApp.password = passwd(password)
else:
c.NotebookApp.password = ''
c.NotebookApp.token = ''
del os.environ['PASSWORD']
|
{
"content_hash": "fe6eef99544b07b222849d898fbf7d9e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7068273092369478,
"repo_name": "mlperf/training_results_v0.7",
"id": "bd5494812303bee0471d0d7d0a4d259da6fbed4e",
"size": "498",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/maskrcnn/implementations/implementation_closed/docker/docker-jupyter/jupyter_notebook_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
}
|
"""Undocumented Module"""
__all__ = ['SfxPlayer']
import math
from panda3d.core import *
class SfxPlayer:
"""
Play sound effects, potentially localized.
"""
UseInverseSquare = 0
def __init__(self):
# volume attenuates according to the inverse square of the
# distance from the source. volume = 1/(distance^2)
# this is the volume at which a sound is nearly inaudible
self.cutoffVolume = .02
# cutoff for inverse square attenuation
if SfxPlayer.UseInverseSquare:
self.setCutoffDistance(300.0)
else:
# Distance at which sounds can no longer be heard
# This was determined experimentally
self.setCutoffDistance(120.0)
def setCutoffDistance(self, d):
self.cutoffDistance = d
# this is the 'raw' distance at which the volume of a sound will
# be equal to the cutoff volume
rawCutoffDistance = math.sqrt(1./self.cutoffVolume)
# this is a scale factor to convert distances so that a sound
# located at self.cutoffDistance will have a volume
# of self.cutoffVolume
self.distanceScale = rawCutoffDistance / self.cutoffDistance
def getCutoffDistance(self):
"""Return the curent cutoff distance."""
return self.cutoffDistance
def getLocalizedVolume(self, node, listenerNode = None, cutoff = None):
"""
Get the volume that a sound should be played at if it is
localized at this node. We compute this wrt the camera
or to listenerNode.
"""
d = None
if not node.isEmpty():
if listenerNode and not listenerNode.isEmpty():
d = node.getDistance(listenerNode)
else:
d = node.getDistance(base.cam)
if d == None or d > cutoff:
volume = 0
else:
if SfxPlayer.UseInverseSquare:
sd = d*self.distanceScale
volume = min(1, 1 / (sd*sd or 1))
#print d, sd, volume
else:
volume = 1 - (d / (cutoff or 1))
#print d, volume
return volume
def playSfx(
self, sfx, looping = 0, interrupt = 1, volume = None,
time = 0.0, node=None, listenerNode = None, cutoff = None):
if sfx:
if not cutoff:
cutoff = self.cutoffDistance
self.setFinalVolume(sfx, node, volume, listenerNode, cutoff)
# don't start over if it's already playing, unless
# "interrupt" was specified
if interrupt or (sfx.status() != AudioSound.PLAYING):
sfx.setTime(time)
sfx.setLoop(looping)
sfx.play()
def setFinalVolume(self, sfx, node, volume, listenerNode, cutoff = None):
"""Calculate the final volume based on all contributed factors."""
# If we have either a node or a volume, we need to adjust the sfx
# The volume passed in multiplies the distance base volume
if node or (volume is not None):
if node:
finalVolume = self.getLocalizedVolume(node, listenerNode, cutoff)
else:
finalVolume = 1
if volume is not None:
finalVolume *= volume
if node is not None:
finalVolume *= node.getNetAudioVolume()
sfx.setVolume(finalVolume)
|
{
"content_hash": "2d5c5671a129d2e2fd0a6d5c68626c92",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 81,
"avg_line_length": 34.63,
"alnum_prop": 0.579844065838868,
"repo_name": "mgracer48/panda3d",
"id": "2fb97eae322f4173834e41348872d1d48afae566",
"size": "3463",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "direct/src/showbase/SfxPlayer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "6395157"
},
{
"name": "C++",
"bytes": "31241851"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "Groff",
"bytes": "3106"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "4403"
},
{
"name": "Objective-C",
"bytes": "30065"
},
{
"name": "Objective-C++",
"bytes": "300394"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30636"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5530563"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
}
|
from marshmallow import EXCLUDE, fields, validate
from polyaxon.api import STATIC_V1
from polyaxon.contexts import paths as ctx_paths
from polyaxon.env_vars.keys import (
EV_KEYS_ARCHIVE_ROOT,
EV_KEYS_DNS_BACKEND,
EV_KEYS_DNS_CUSTOM_CLUSTER,
EV_KEYS_DNS_PREFIX,
EV_KEYS_DNS_USE_RESOLVER,
EV_KEYS_K8S_NAMESPACE,
EV_KEYS_LOG_LEVEL,
EV_KEYS_NGINX_INDENT_CHAR,
EV_KEYS_NGINX_INDENT_WIDTH,
EV_KEYS_NGINX_TIMEOUT,
EV_KEYS_PROXY_API_HOST,
EV_KEYS_PROXY_API_PORT,
EV_KEYS_PROXY_API_TARGET_PORT,
EV_KEYS_PROXY_API_USE_RESOLVER,
EV_KEYS_PROXY_AUTH_ENABLED,
EV_KEYS_PROXY_AUTH_EXTERNAL,
EV_KEYS_PROXY_AUTH_USE_RESOLVER,
EV_KEYS_PROXY_FORWARD_PROXY_HOST,
EV_KEYS_PROXY_FORWARD_PROXY_KIND,
EV_KEYS_PROXY_FORWARD_PROXY_PORT,
EV_KEYS_PROXY_GATEWAY_HOST,
EV_KEYS_PROXY_GATEWAY_PORT,
EV_KEYS_PROXY_GATEWAY_TARGET_PORT,
EV_KEYS_PROXY_HAS_FORWARD_PROXY,
EV_KEYS_PROXY_NAMESPACES,
EV_KEYS_PROXY_SERVICES_PORT,
EV_KEYS_PROXY_SSL_ENABLED,
EV_KEYS_PROXY_SSL_PATH,
EV_KEYS_PROXY_STREAMS_HOST,
EV_KEYS_PROXY_STREAMS_PORT,
EV_KEYS_PROXY_STREAMS_TARGET_PORT,
EV_KEYS_STATIC_ROOT,
EV_KEYS_STATIC_URL,
EV_KEYS_UI_ADMIN_ENABLED,
)
from polyaxon.schemas.base import BaseConfig, BaseSchema
class ProxiesSchema(BaseSchema):
namespace = fields.Str(allow_none=True, data_key=EV_KEYS_K8S_NAMESPACE)
namespaces = fields.List(
fields.Int(), allow_none=True, data_key=EV_KEYS_PROXY_NAMESPACES
)
gateway_port = fields.Int(allow_none=True, data_key=EV_KEYS_PROXY_GATEWAY_PORT)
gateway_target_port = fields.Int(
allow_none=True, data_key=EV_KEYS_PROXY_GATEWAY_TARGET_PORT
)
gateway_host = fields.Str(allow_none=True, data_key=EV_KEYS_PROXY_GATEWAY_HOST)
streams_port = fields.Int(allow_none=True, data_key=EV_KEYS_PROXY_STREAMS_PORT)
streams_target_port = fields.Int(
allow_none=True, data_key=EV_KEYS_PROXY_STREAMS_TARGET_PORT
)
streams_host = fields.Str(allow_none=True, data_key=EV_KEYS_PROXY_STREAMS_HOST)
api_port = fields.Int(allow_none=True, data_key=EV_KEYS_PROXY_API_PORT)
api_target_port = fields.Int(
allow_none=True, data_key=EV_KEYS_PROXY_API_TARGET_PORT
)
api_host = fields.Str(allow_none=True, data_key=EV_KEYS_PROXY_API_HOST)
api_use_resolver = fields.Bool(
allow_none=True, data_key=EV_KEYS_PROXY_API_USE_RESOLVER
)
services_port = fields.Str(allow_none=True, data_key=EV_KEYS_PROXY_SERVICES_PORT)
auth_enabled = fields.Bool(allow_none=True, data_key=EV_KEYS_PROXY_AUTH_ENABLED)
auth_external = fields.Str(allow_none=True, data_key=EV_KEYS_PROXY_AUTH_EXTERNAL)
auth_use_resolver = fields.Bool(
allow_none=True, data_key=EV_KEYS_PROXY_AUTH_USE_RESOLVER
)
ssl_enabled = fields.Bool(allow_none=True, data_key=EV_KEYS_PROXY_SSL_ENABLED)
ssl_path = fields.Str(allow_none=True, data_key=EV_KEYS_PROXY_SSL_PATH)
dns_use_resolver = fields.Bool(allow_none=True, data_key=EV_KEYS_DNS_USE_RESOLVER)
dns_custom_cluster = fields.Str(
allow_none=True, data_key=EV_KEYS_DNS_CUSTOM_CLUSTER
)
dns_backend = fields.Str(allow_none=True, data_key=EV_KEYS_DNS_BACKEND)
dns_prefix = fields.Str(allow_none=True, data_key=EV_KEYS_DNS_PREFIX)
log_level = fields.Str(allow_none=True, data_key=EV_KEYS_LOG_LEVEL)
nginx_timeout = fields.Int(allow_none=True, data_key=EV_KEYS_NGINX_TIMEOUT)
nginx_indent_char = fields.Str(allow_none=True, data_key=EV_KEYS_NGINX_INDENT_CHAR)
nginx_indent_width = fields.Int(
allow_none=True, data_key=EV_KEYS_NGINX_INDENT_WIDTH
)
archive_root = fields.Str(allow_none=True, data_key=EV_KEYS_ARCHIVE_ROOT)
static_root = fields.Str(allow_none=True, data_key=EV_KEYS_STATIC_ROOT)
static_url = fields.Str(allow_none=True, data_key=EV_KEYS_STATIC_URL)
ui_admin_enabled = fields.Bool(allow_none=True, data_key=EV_KEYS_UI_ADMIN_ENABLED)
has_forward_proxy = fields.Bool(
allow_none=True, data_key=EV_KEYS_PROXY_HAS_FORWARD_PROXY
)
forward_proxy_port = fields.Int(
allow_none=True, data_key=EV_KEYS_PROXY_FORWARD_PROXY_PORT
)
forward_proxy_host = fields.Str(
allow_none=True, data_key=EV_KEYS_PROXY_FORWARD_PROXY_HOST
)
forward_proxy_kind = fields.Str(
allow_none=True,
data_key=EV_KEYS_PROXY_FORWARD_PROXY_KIND,
validate=validate.OneOf(["transparent", "connect"]),
)
@staticmethod
def schema_config():
return ProxiesConfig
class ProxiesConfig(BaseConfig):
SCHEMA = ProxiesSchema
IDENTIFIER = "proxies"
UNKNOWN_BEHAVIOUR = EXCLUDE
REDUCED_ATTRIBUTES = [
EV_KEYS_PROXY_GATEWAY_PORT,
EV_KEYS_PROXY_GATEWAY_TARGET_PORT,
EV_KEYS_PROXY_GATEWAY_HOST,
EV_KEYS_PROXY_NAMESPACES,
EV_KEYS_PROXY_STREAMS_PORT,
EV_KEYS_PROXY_STREAMS_TARGET_PORT,
EV_KEYS_PROXY_STREAMS_HOST,
EV_KEYS_PROXY_API_PORT,
EV_KEYS_PROXY_API_TARGET_PORT,
EV_KEYS_PROXY_API_HOST,
EV_KEYS_PROXY_API_USE_RESOLVER,
EV_KEYS_PROXY_SERVICES_PORT,
EV_KEYS_PROXY_SSL_ENABLED,
EV_KEYS_PROXY_SSL_PATH,
EV_KEYS_PROXY_AUTH_ENABLED,
EV_KEYS_PROXY_AUTH_EXTERNAL,
EV_KEYS_PROXY_AUTH_USE_RESOLVER,
EV_KEYS_DNS_USE_RESOLVER,
EV_KEYS_DNS_CUSTOM_CLUSTER,
EV_KEYS_DNS_BACKEND,
EV_KEYS_DNS_PREFIX,
EV_KEYS_NGINX_TIMEOUT,
EV_KEYS_NGINX_INDENT_CHAR,
EV_KEYS_NGINX_INDENT_WIDTH,
EV_KEYS_K8S_NAMESPACE,
EV_KEYS_LOG_LEVEL,
EV_KEYS_ARCHIVE_ROOT,
EV_KEYS_STATIC_ROOT,
EV_KEYS_STATIC_URL,
EV_KEYS_UI_ADMIN_ENABLED,
EV_KEYS_PROXY_HAS_FORWARD_PROXY,
EV_KEYS_PROXY_FORWARD_PROXY_PORT,
EV_KEYS_PROXY_FORWARD_PROXY_HOST,
EV_KEYS_PROXY_FORWARD_PROXY_KIND,
]
def __init__(
self,
namespace=None,
namespaces=None,
auth_enabled=None,
auth_external=None,
auth_use_resolver=None,
gateway_port=None,
gateway_target_port=None,
gateway_host=None,
streams_port=None,
streams_target_port=None,
streams_host=None,
api_port=None,
api_target_port=None,
api_host=None,
api_use_resolver=None,
services_port=None,
dns_use_resolver=None,
dns_custom_cluster=None,
dns_backend=None,
dns_prefix=None,
nginx_timeout=None,
nginx_indent_char=None,
nginx_indent_width=None,
log_level=None,
ssl_enabled=None,
ssl_path=None,
archive_root=None,
static_root=None,
static_url=None,
ui_admin_enabled=None,
has_forward_proxy=None,
forward_proxy_port=None,
forward_proxy_host=None,
forward_proxy_kind=None,
**kwargs
):
self.namespace = namespace
self.namespaces = namespaces
self.auth_enabled = auth_enabled or False
self.auth_external = auth_external
self.auth_use_resolver = auth_use_resolver or False
self.gateway_port = gateway_port or self.default_port
self.gateway_target_port = gateway_target_port or self.default_target_port
self.gateway_host = gateway_host or "polyaxon-polyaxon-gateway"
self.streams_port = streams_port or self.default_port
self.streams_target_port = streams_target_port or self.default_target_port
self.streams_host = streams_host or "polyaxon-polyaxon-streams"
self.api_port = api_port or self.default_port
self.api_target_port = api_target_port or self.default_target_port
self.api_host = api_host or "polyaxon-polyaxon-api"
self.api_use_resolver = api_use_resolver or False
self.services_port = services_port or self.default_port
self.dns_use_resolver = dns_use_resolver or False
self.dns_custom_cluster = dns_custom_cluster or "cluster.local"
self.dns_backend = dns_backend or "kube-dns"
self.dns_prefix = dns_prefix
self.nginx_timeout = nginx_timeout or 650
self.nginx_indent_char = nginx_indent_char or " "
self.nginx_indent_width = nginx_indent_width or 4
self.ssl_enabled = ssl_enabled or False
self.log_level = log_level or "warn"
self.log_level = self.log_level.lower()
self.ssl_path = ssl_path or "/etc/ssl/polyaxon"
self.archive_root = archive_root or ctx_paths.CONTEXT_ARCHIVE_ROOT
self.static_root = static_root or "/{}".format(STATIC_V1)
self.static_url = static_url
self.ui_admin_enabled = ui_admin_enabled
self.has_forward_proxy = has_forward_proxy
self.forward_proxy_port = forward_proxy_port
self.forward_proxy_host = forward_proxy_host
self.forward_proxy_kind = forward_proxy_kind
@property
def default_target_port(self):
return 8000
@property
def default_port(self):
return 80
|
{
"content_hash": "d707268d332559fe7718aae5a7776e43",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 87,
"avg_line_length": 38.98706896551724,
"alnum_prop": 0.6595909342177999,
"repo_name": "polyaxon/polyaxon",
"id": "537871146857331f7bae8d83bf3fabed2198dd29",
"size": "9650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/polyaxon/schemas/cli/proxies_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
}
|
import anki
print anki.__all__
|
{
"content_hash": "c13584f2bfcdafaa59de2cb69168d82e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 18,
"avg_line_length": 10.666666666666666,
"alnum_prop": 0.6875,
"repo_name": "jlitven/vexer",
"id": "6abb516e794fac0fbca1c05d07bbbeb84fac970b",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/anki/__main__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "761396"
}
],
"symlink_target": ""
}
|
"""
Support for Ankuoo RecSwitch MS6126 devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.recswitch/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pyrecswitch==1.0.2']
DEFAULT_NAME = 'RecSwitch {0}'
DATA_RSN = 'RSN'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): vol.All(cv.string, vol.Upper),
vol.Optional(CONF_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the device."""
from pyrecswitch import RSNetwork
host = config[CONF_HOST]
mac_address = config[CONF_MAC]
device_name = config.get(CONF_NAME)
if not hass.data.get(DATA_RSN):
hass.data[DATA_RSN] = RSNetwork()
job = hass.data[DATA_RSN].create_datagram_endpoint(loop=hass.loop)
hass.async_create_task(job)
device = hass.data[DATA_RSN].register_device(mac_address, host)
async_add_entities([RecSwitchSwitch(device, device_name, mac_address)])
class RecSwitchSwitch(SwitchDevice):
"""Representation of a recswitch device."""
def __init__(self, device, device_name, mac_address):
"""Initialize a recswitch device."""
self.gpio_state = False
self.device = device
self.device_name = device_name
self.mac_address = mac_address
if not self.device_name:
self.device_name = DEFAULT_NAME.format(self.mac_address)
@property
def unique_id(self):
"""Return the switch unique ID."""
return self.mac_address
@property
def name(self):
"""Return the switch name."""
return self.device_name
@property
def is_on(self):
"""Return true if switch is on."""
return self.gpio_state
async def async_turn_on(self, **kwargs):
"""Turn on the switch."""
await self.async_set_gpio_status(True)
async def async_turn_off(self, **kwargs):
"""Turn off the switch."""
await self.async_set_gpio_status(False)
async def async_set_gpio_status(self, status):
"""Set the switch status."""
from pyrecswitch import RSNetworkError
try:
ret = await self.device.set_gpio_status(status)
self.gpio_state = ret.state
except RSNetworkError as error:
_LOGGER.error('Setting status to %s: %r', self.name, error)
async def async_update(self):
"""Update the current switch status."""
from pyrecswitch import RSNetworkError
try:
ret = await self.device.get_gpio_status()
self.gpio_state = ret.state
except RSNetworkError as error:
_LOGGER.error('Reading status from %s: %r', self.name, error)
|
{
"content_hash": "75df31f03f7ec9d34da5ac6deb250d56",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 75,
"avg_line_length": 30.594059405940595,
"alnum_prop": 0.6469255663430421,
"repo_name": "tinloaf/home-assistant",
"id": "636c302cea118d227a179a3ed7c3b448d789653c",
"size": "3090",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/recswitch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
}
|
import sys
from qt5-base-5.4.03 import *
app = QApplication(sys.argv)
button = QPushButton("Hello world0",None)
button.show()
app.exec_()
|
{
"content_hash": "0a6aaf01aebb77ec1a938bb5743badee",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 41,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.7372262773722628,
"repo_name": "WCP52/docs",
"id": "60fd90d44c3b329b2ca6f7a455e3562ca1c56a8a",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/demo/testgui.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "5142"
},
{
"name": "Makefile",
"bytes": "404460"
},
{
"name": "Matlab",
"bytes": "15656"
},
{
"name": "Python",
"bytes": "64617"
},
{
"name": "Shell",
"bytes": "2547"
},
{
"name": "TeX",
"bytes": "146963"
}
],
"symlink_target": ""
}
|
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import re
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
# TODO(b/118385027): Dependency on keras can be problematic if Keras moves out
# of the main repo.
from tensorflow.python.keras import initializers
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
_FEATURE_COLUMN_DEPRECATION_DATE = None
_FEATURE_COLUMN_DEPRECATION = ('The old _FeatureColumn APIs are being '
'deprecated. Please use the new FeatureColumn '
'APIs instead.')
class StateManager(object):
"""Manages the state associated with FeatureColumns.
Some `FeatureColumn`s create variables or resources to assist their
computation. The `StateManager` is responsible for creating and storing these
objects since `FeatureColumn`s are supposed to be stateless configuration
only.
"""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
"""Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
"""
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
def add_variable(self, feature_column, var):
"""Adds an existing variable to the state.
Args:
feature_column: A `FeatureColumn` object to associate this variable with.
var: The variable.
"""
del feature_column, var
raise NotImplementedError('StateManager.add_variable')
def get_variable(self, feature_column, name):
"""Returns an existing variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_var')
def add_resource(self, feature_column, name, resource):
"""Creates a new resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this resource corresponds to.
name: Name of the resource.
resource: The resource.
Returns:
The created resource.
"""
del feature_column, name, resource
raise NotImplementedError('StateManager.add_resource')
def get_resource(self, feature_column, name):
"""Returns an already created resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_resource')
class _StateManagerImpl(StateManager):
"""Manages the state of DenseFeatures and LinearLayer."""
def __init__(self, layer, trainable):
"""Creates an _StateManagerImpl object.
Args:
layer: The input layer this state manager is associated with.
trainable: Whether by default, variables created are trainable or not.
"""
self._trainable = trainable
self._layer = layer
if self._layer is not None and not hasattr(self._layer, '_resources'):
self._layer._resources = [] # pylint: disable=protected-access
self._cols_to_vars_map = collections.defaultdict(lambda: {})
# TODO(vbardiovsky): Make sure the resources are tracked by moving them to
# the layer (inheriting from AutoTrackable), e.g.:
# self._layer._resources_map = data_structures.Mapping()
self._cols_to_resources_map = collections.defaultdict(lambda: {})
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
if isinstance(var, variables.PartitionedVariable):
for v in var:
part_name = name + '/' + str(v._get_save_slice_info().var_offset[0]) # pylint: disable=protected-access
self._layer._track_trackable(v, feature_column.name + '/' + part_name) # pylint: disable=protected-access
else:
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
def get_variable(self, feature_column, name):
if name in self._cols_to_vars_map[feature_column]:
return self._cols_to_vars_map[feature_column][name]
raise ValueError('Variable does not exist.')
def add_resource(self, feature_column, name, resource):
self._cols_to_resources_map[feature_column][name] = resource
if self._layer is not None:
self._layer._resources.append(resource) # pylint: disable=protected-access
def get_resource(self, feature_column, name):
if name in self._cols_to_resources_map[feature_column]:
return self._cols_to_resources_map[feature_column][name]
raise ValueError('Resource does not exist.')
class _StateManagerImplV2(_StateManagerImpl):
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self,
feature_columns,
expected_column_type,
trainable,
name,
partitioner=None,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
self._state_manager = _StateManagerImpl(self, self.trainable)
self._partitioner = partitioner
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
self.name,
partitioner=self._partitioner):
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
_sanitize_column_name_for_variable_scope(column.name)):
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor."""
num_elements = column.variable_shape.num_elements()
target_shape = self._target_shape(array_ops.shape(tensor), num_elements)
return array_ops.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return array_ops.concat(output_tensors, -1)
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {'feature_columns': column_configs}
config['partitioner'] = generic_utils.serialize_keras_object(
self._partitioner)
base_config = super( # pylint: disable=bad-super-call
_BaseFeaturesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
config_cp['feature_columns'] = serialization.deserialize_feature_columns(
config['feature_columns'], custom_objects=custom_objects)
config_cp['partitioner'] = generic_utils.deserialize_keras_object(
config['partitioner'], custom_objects)
return cls(**config_cp)
class _LinearModelLayer(Layer):
"""Layer that contains logic for `LinearModel`."""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
super(_LinearModelLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
for column in self._feature_columns:
if not isinstance(column, (DenseColumn, CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'DenseColumn or CategoricalColumn. Given: {}'.format(column))
self._units = units
self._sparse_combiner = sparse_combiner
self._state_manager = _StateManagerImpl(self, self.trainable)
self.bias = None
def build(self, _):
# We need variable scopes for now because we want the variable partitioning
# information to percolate down. We also use _pure_variable_scope's here
# since we want to open up a name_scope in the `call` method while creating
# the ops.
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
for column in self._feature_columns:
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
_sanitize_column_name_for_variable_scope(column.name)):
# Create the state for each feature column
column.create_state(self._state_manager)
# Create a weight variable for each column.
if isinstance(column, CategoricalColumn):
first_dim = column.num_buckets
else:
first_dim = column.variable_shape.num_elements()
self._state_manager.create_variable(
column,
name='weights',
dtype=dtypes.float32,
shape=(first_dim, self._units),
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
# Create a bias variable.
self.bias = self.add_variable(
name='bias_weights',
dtype=dtypes.float32,
shape=[self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable,
use_resource=True,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
super(_LinearModelLayer, self).build(None)
def call(self, features):
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: {}'
.format(features))
with ops.name_scope(self.name):
transformation_cache = FeatureTransformationCache(features)
weighted_sums = []
for column in self._feature_columns:
with ops.name_scope(
_sanitize_column_name_for_variable_scope(column.name)):
# All the weights used in the linear model are owned by the state
# manager associated with this Linear Model.
weight_var = self._state_manager.get_variable(column, 'weights')
weighted_sum = _create_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=self._state_manager,
sparse_combiner=self._sparse_combiner,
weight_var=weight_var)
weighted_sums.append(weighted_sum)
_verify_static_batch_size_equality(weighted_sums, self._feature_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias, self.bias, name='weighted_sum')
return predictions
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {
'feature_columns': column_configs,
'units': self._units,
'sparse_combiner': self._sparse_combiner
}
base_config = super( # pylint: disable=bad-super-call
_LinearModelLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
columns = serialization.deserialize_feature_columns(
config_cp['feature_columns'], custom_objects=custom_objects)
del config_cp['feature_columns']
return cls(feature_columns=columns, **config_cp)
# TODO(tanzheny): Cleanup it with respect to Premade model b/132690565.
class LinearModel(training.Model):
"""Produces a linear prediction `Tensor` based on given `feature_columns`.
This layer generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `LinearLayer` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
linear_model = LinearLayer(columns)
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features)
```
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
"""Constructs a LinearLayer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum"
are supported, with "sum" the default for linear model. "sqrtn" often
achieves good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "g"
}
```
with `sparse_combiner` as "mean", the linear model outputs conceptually
are
```
y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0
y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1
```
where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name to give to the Linear Model. All variables and ops created will
be scoped by this name.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is neither a `DenseColumn`
nor `CategoricalColumn`.
"""
super(LinearModel, self).__init__(name=name, **kwargs)
self.layer = _LinearModelLayer(
feature_columns,
units,
sparse_combiner,
trainable,
name=self.name,
**kwargs)
def call(self, features):
"""Returns a `Tensor` the represents the predictions of a linear model.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its
shape is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: If features are not a dictionary.
"""
return self.layer(features)
@property
def bias(self):
return self.layer.bias
def _transform_features_v2(features, feature_columns, state_manager):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `FeatureColumn`.
feature_columns: An iterable containing all the `FeatureColumn`s.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
transformation_cache = FeatureTransformationCache(features)
for column in feature_columns:
with ops.name_scope(
None,
default_name=_sanitize_column_name_for_variable_scope(column.name)):
outputs[column] = transformation_cache.get(column, state_manager)
return outputs
@tf_export('feature_column.make_parse_example_spec', v1=[])
def make_parse_example_spec_v2(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in
`tf.io.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = tf.feature_column.categorical_column_with_vocabulary_file(...)
feature_b = tf.feature_column.numeric_column(...)
feature_c_bucketized = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("feature_c"), ...)
feature_a_x_feature_c = tf.feature_column.crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.io.parse_example(
serialized=serialized_examples,
features=tf.feature_column.make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('All feature_columns must be FeatureColumn instances. '
'Given: {}'.format(column))
config = column.parse_example_spec
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
@tf_export('feature_column.embedding_column')
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""`DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
`DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
return EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@tf_export(v1=['feature_column.shared_embedding_columns'])
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0._num_buckets # pylint: disable=protected-access
if not isinstance(c0, fc_old._CategoricalColumn): # pylint: disable=protected-access
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
while isinstance(
c0, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access
fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access
c0 = c0.categorical_column
for c in sorted_columns[1:]:
while isinstance(
c, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access
fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column or sequence column '
'of the same type. Given column: {} of type: {} does not match given '
'column: {} of type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(
fc_old._SharedEmbeddingColumn( # pylint: disable=protected-access
categorical_column=column,
initializer=initializer,
dimension=dimension,
combiner=combiner,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup))
return result
@tf_export('feature_column.shared_embeddings', v1=[])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional collective name of these columns.
If not given, a reasonable name will be chosen based on the names of
`categorical_columns`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0.num_buckets
if not isinstance(c0, CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
while isinstance(c0, (WeightedCategoricalColumn, SequenceCategoricalColumn)):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
while isinstance(c, (WeightedCategoricalColumn, SequenceCategoricalColumn)):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column or sequence column '
'of the same type. Given column: {} of type: {} does not match given '
'column: {} of type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c.num_buckets:
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c.num_buckets))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
column_creator = SharedEmbeddingColumnCreator(
dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt,
num_buckets, trainable, shared_embedding_collection_name,
use_safe_embedding_lookup)
result = []
for column in categorical_columns:
result.append(
column_creator(
categorical_column=column, combiner=combiner, max_norm=max_norm))
return result
@tf_export('feature_column.numeric_column')
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.io.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
@tf_export('feature_column.bucketized_column')
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input bucketed by `boundaries`.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = tf.feature_column.numeric_column('price')
bucketized_price = tf.feature_column.bucketized_column(
price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = tf.keras.layers.DenseFeatures(columns)(features)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = tf.feature_column.numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = tf.feature_column.bucketized_column(
price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = tf.feature_column.crossed_column(
[bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = tf.keras.layers.DenseFeatures(columns)(features)
linear_model = tf.keras.experimental.LinearModel(units=...)(dense_tensor)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)): # pylint: disable=protected-access
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if not boundaries:
raise ValueError('boundaries must not be empty.')
if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return BucketizedColumn(source_column, tuple(boundaries))
@tf_export('feature_column.categorical_column_with_hash_bucket')
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return HashedCategoricalColumn(key, hash_bucket_size, dtype)
@tf_export(v1=['feature_column.categorical_column_with_vocabulary_file'])
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return categorical_column_with_vocabulary_file_v2(
key, vocabulary_file, vocabulary_size,
dtype, default_value,
num_oov_buckets)
@tf_export('feature_column.categorical_column_with_vocabulary_file', v1=[])
def categorical_column_with_vocabulary_file_v2(key,
vocabulary_file,
vocabulary_size=None,
dtype=dtypes.string,
default_value=None,
num_oov_buckets=0):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal
`'XX'` in input, and other values missing from the file, will be assigned
ID 0. All others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
dtype: The type of features. Only string and integer types are supported.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file, mode='rb') as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_list')
def categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the column
name and the dictionary key for feature parsing configs, feature `Tensor`
objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported. If
`None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyListCategoricalColumn(
key=key,
vocabulary_list=tuple(vocabulary_list),
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets)
@tf_export('feature_column.categorical_column_with_identity')
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If set, values outside of range `[0, num_buckets)` will
be replaced with this value. If not set, values >= num_buckets will
cause a failure while values < 0 will be dropped.
Returns:
A `CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return IdentityCategoricalColumn(
key=key, number_buckets=num_buckets, default_value=default_value)
@tf_export('feature_column.indicator_column')
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `IndicatorColumn`.
Raises:
ValueError: If `categorical_column` is not CategoricalColumn type.
"""
if not isinstance(categorical_column,
(CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'Unsupported input type. Input must be a CategoricalColumn. '
'Given: {}'.format(categorical_column))
return IndicatorColumn(categorical_column)
@tf_export('feature_column.weighted_categorical_column')
def weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
@tf_export('feature_column.crossed_column')
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key,
(HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)
@six.add_metaclass(abc.ABCMeta)
class FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish between the concept of a feature family and a specific binary
feature within a family, we refer to a feature family like "country" as a
feature column. For example, we can have a feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. Users should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming."""
pass
def __lt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
In CPython, `__lt__` must be defined for all objects in the
sequence being sorted.
If any objects in teh sequence being sorted do not have an `__lt__` method
compatible with feature column objects (such as strings), then CPython will
fall back to using the `__gt__` method below.
https://docs.python.org/3/library/stdtypes.html#list.sort
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically less
than the string representation of `other`. For FeatureColumn objects,
this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) < str(other)
def __gt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
`__gt__` is called when the "other" object being compared during the sort
does not have `__lt__` defined.
Example: http://gpaste/4803354716798976
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically
greater than the string representation of `other`. For FeatureColumn
objects, this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) > str(other)
@abc.abstractmethod
def transform_feature(self, transformation_cache, state_manager):
"""Returns intermediate representation (usually a `Tensor`).
Uses `transformation_cache` to create an intermediate representation
(usually a `Tensor`) that other feature columns can use.
Example usage of `transformation_cache`:
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). To access corresponding `Tensor`s,
transformation_cache will be used as follows:
```python
raw_tensor = transformation_cache.get('raw', state_manager)
fc_tensor = transformation_cache.get(input_fc, state_manager)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.io.parse_example` for
all supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). One possible implementation of
parse_example_spec is as follows:
```python
spec = {'raw': tf.io.FixedLenFeature(...)}
spec.update(input_fc.parse_example_spec)
return spec
```
"""
pass
def create_state(self, state_manager):
"""Uses the `state_manager` to create state for the FeatureColumn.
Args:
state_manager: A `StateManager` to create / access resources such as
lookup tables and variables.
"""
pass
@abc.abstractproperty
def _is_v2_column(self):
"""Returns whether this FeatureColumn is fully conformant to the new API.
This is needed for composition type cases where an EmbeddingColumn etc.
might take in old categorical columns as input and then we want to use the
old API.
"""
pass
@abc.abstractproperty
def parents(self):
"""Returns a list of immediate raw feature and FeatureColumn dependencies.
For example:
# For the following feature columns
a = numeric_column('f1')
c = crossed_column(a, 'f2')
# The expected parents are:
a.parents = ['f1']
c.parents = [a, 'f2']
"""
pass
def get_config(self):
"""Returns the config of the feature column.
A FeatureColumn config is a Python dictionary (serializable) containing the
configuration of a FeatureColumn. The same FeatureColumn can be
reinstantiated later from this configuration.
The config of a feature column does not include information about feature
columns depending on it nor the FeatureColumn class name.
Example with (de)serialization practices followed in this file:
```python
class SerializationExampleFeatureColumn(
FeatureColumn, collections.namedtuple(
'SerializationExampleFeatureColumn',
('dimension', 'parent', 'dtype', 'normalizer_fn'))):
def get_config(self):
# Create a dict from the namedtuple.
# Python attribute literals can be directly copied from / to the config.
# For example 'dimension', assuming it is an integer literal.
config = dict(zip(self._fields, self))
# (De)serialization of parent FeatureColumns should use the provided
# (de)serialize_feature_column() methods that take care of de-duping.
config['parent'] = serialize_feature_column(self.parent)
# Many objects provide custom (de)serialization e.g: for tf.DType
# tf.DType.name, tf.as_dtype() can be used.
config['dtype'] = self.dtype.name
# Non-trivial dependencies should be Keras-(de)serializable.
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
# This should do the inverse transform from `get_config` and construct
# the namedtuple.
kwargs = config.copy()
kwargs['parent'] = deserialize_feature_column(
config['parent'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
return cls(**kwargs)
```
Returns:
A serializable Dict that can be used to deserialize the object with
from_config.
"""
return self._get_config()
def _get_config(self):
raise NotImplementedError('Must be implemented in subclasses.')
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""Creates a FeatureColumn from its config.
This method should be the reverse of `get_config`, capable of instantiating
the same FeatureColumn from the config dictionary. See `get_config` for an
example of common (de)serialization practices followed in this file.
TODO(b/118939620): This is a private method until consensus is reached on
supporting object deserialization deduping within Keras.
Args:
config: A Dict config acquired with `get_config`.
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
columns_by_name: A Dict[String, FeatureColumn] of existing columns in
order to avoid duplication. Should be passed to any calls to
deserialize_feature_column().
Returns:
A FeatureColumn for the input config.
"""
return cls._from_config(config, custom_objects, columns_by_name)
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
raise NotImplementedError('Must be implemented in subclasses.')
class DenseColumn(FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def variable_shape(self):
"""`TensorShape` of `get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
`Tensor` of shape [batch_size] + `variable_shape`.
"""
pass
def is_feature_column_v2(feature_columns):
"""Returns True if all feature columns are V2."""
for feature_column in feature_columns:
if not isinstance(feature_column, FeatureColumn):
return False
if not feature_column._is_v2_column: # pylint: disable=protected-access
return False
return True
def _create_weighted_sum(column, transformation_cache, state_manager,
sparse_combiner, weight_var):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
sparse_combiner=sparse_combiner,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column, transformation_cache,
state_manager, weight_var):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column.get_dense_tensor(transformation_cache, state_manager)
num_elements = column.variable_shape.num_elements()
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
return math_ops.matmul(tensor, weight_var, name='weighted_sum')
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ('id_tensor', 'weight_tensor'))
@abc.abstractproperty
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
def _create_categorical_column_weighted_sum(
column, transformation_cache, state_manager, sparse_combiner, weight_var):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column.get_sparse_tensors(transformation_cache,
state_manager)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
return embedding_ops.safe_embedding_lookup_sparse(
weight_var,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class SequenceDenseColumn(FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length'))
@abc.abstractmethod
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
class FeatureTransformationCache(object):
"""Handles caching of transformations while building the model.
`FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`FeatureTransformationCache` caches all previously transformed columns.
Example:
We're trying to use the following `FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `FeatureTransformationCache` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `FeatureTransformationCache`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `FeatureColumn` key
means that this `Tensor` is the output of an existing `FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key, state_manager):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `FeatureColumn`.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, FeatureColumn):
raise TypeError('"key" must be either a "str" or "FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column.transform_feature(self, state_manager)
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where_v2(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections_abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('Items of feature_columns must be a FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
class NumericColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'NumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""see `numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
Raises:
ValueError: If a SparseTensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(self.shape)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class BucketizedColumn(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('BucketizedColumn',
('source_column', 'boundaries'))):
"""See `bucketized_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.source_column, FeatureColumn) and
self.source_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def _get_dense_tensor_for_input_tensor(self, input_tensor):
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def _get_sparse_tensors_for_input_tensor(self, input_tensor):
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return CategoricalColumn.IdWeightPair(sparse_tensor, None)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class EmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable',
'use_safe_embedding_lookup'))):
"""See `embedding_column`."""
def __new__(cls,
categorical_column,
dimension,
combiner,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
max_norm,
trainable,
use_safe_embedding_lookup=True):
return super(EmbeddingColumn, cls).__new__(
cls,
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.categorical_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape([self.dimension])
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
default_num_buckets = (self.categorical_column.num_buckets
if self._is_v2_column
else self.categorical_column._num_buckets) # pylint: disable=protected-access
num_buckets = getattr(self.categorical_column, 'num_buckets',
default_num_buckets)
embedding_shape = (num_buckets, self.dimension)
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
sparse_id_rank = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse
if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and
sparse_id_rank <= 2):
embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse
# Return embedding lookup result.
return embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections,
trainable):
"""Private method that follows the signature of _get_dense_tensor."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
Raises:
ValueError: `categorical_column` is SequenceCategoricalColumn.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections, trainable)
return self._old_get_dense_tensor_internal(sparse_tensors,
weight_collections, trainable)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
dense_tensor = self._get_dense_tensor_internal(sparse_tensors,
state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
dense_tensor = self._old_get_dense_tensor_internal(
sparse_tensors,
weight_collections=weight_collections,
trainable=trainable)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['initializer'] = initializers.serialize(self.initializer)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
if 'use_safe_embedding_lookup' not in config:
config['use_safe_embedding_lookup'] = True
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['initializer'] = initializers.deserialize(
config['initializer'], custom_objects=custom_objects)
return cls(**kwargs)
def _raise_shared_embedding_column_error():
raise ValueError('SharedEmbeddingColumns are not supported in '
'`linear_model` or `input_layer`. Please use '
'`DenseFeatures` or `LinearModel` instead.')
class SharedEmbeddingColumnCreator(tracking.AutoTrackable):
def __init__(self,
dimension,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
num_buckets,
trainable,
name='shared_embedding_column_creator',
use_safe_embedding_lookup=True):
self._dimension = dimension
self._initializer = initializer
self._ckpt_to_load_from = ckpt_to_load_from
self._tensor_name_in_ckpt = tensor_name_in_ckpt
self._num_buckets = num_buckets
self._trainable = trainable
self._name = name
self._use_safe_embedding_lookup = use_safe_embedding_lookup
# Map from graph keys to embedding_weight variables.
self._embedding_weights = {}
def __call__(self, categorical_column, combiner, max_norm):
return SharedEmbeddingColumn(categorical_column, self, combiner, max_norm,
self._use_safe_embedding_lookup)
@property
def embedding_weights(self):
key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if key not in self._embedding_weights:
embedding_shape = (self._num_buckets, self._dimension)
var = variable_scope.get_variable(
name=self._name,
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self._trainable)
if self._ckpt_to_load_from is not None:
to_restore = var
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(
self._ckpt_to_load_from, {self._tensor_name_in_ckpt: to_restore})
self._embedding_weights[key] = var
return self._embedding_weights[key]
@property
def dimension(self):
return self._dimension
class SharedEmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SharedEmbeddingColumn',
('categorical_column', 'shared_embedding_column_creator', 'combiner',
'max_norm', 'use_safe_embedding_lookup'))):
"""See `embedding_column`."""
def __new__(cls,
categorical_column,
shared_embedding_column_creator,
combiner,
max_norm,
use_safe_embedding_lookup=True):
return super(SharedEmbeddingColumn, cls).__new__(
cls,
categorical_column=categorical_column,
shared_embedding_column_creator=shared_embedding_column_creator,
combiner=combiner,
max_norm=max_norm,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
def _parse_example_spec(self):
return _raise_shared_embedding_column_error()
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return transformation_cache.get(self.categorical_column, state_manager)
def _transform_feature(self, inputs):
return _raise_shared_embedding_column_error()
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
[self.shared_embedding_column_creator.dimension])
@property
def _variable_shape(self):
return _raise_shared_embedding_column_error()
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.shared_embedding_column_creator.embedding_weights
sparse_id_rank = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse
if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and
sparse_id_rank <= 2):
embedding_lookup_sparse = (embedding_ops.embedding_lookup_sparse)
# Return embedding lookup result.
return embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns the embedding lookup result."""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(transformation_cache, state_manager)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal(transformation_cache,
state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
return _raise_shared_embedding_column_error()
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class HashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'hash_bucket_size', 'dtype'))):
"""see `categorical_column_with_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyFileCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('VocabularyFileCategoricalColumn',
('key', 'vocabulary_file', 'vocabulary_size',
'num_oov_buckets', 'dtype', 'default_value'))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
table = lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyListCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'VocabularyListCategoricalColumn',
('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'))
):
"""See `categorical_column_with_vocabulary_list`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary list."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
table = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary list."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class IdentityCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('IdentityCategoricalColumn',
('key', 'number_buckets', 'default_value'))):
"""See `categorical_column_with_identity`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Returns a SparseTensor with identity values."""
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = input_tensor.values
if input_tensor.values.dtype != dtypes.int64:
values = math_ops.cast(values, dtypes.int64, name='values')
if self.default_value is not None:
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
# Assign default for out-of-range values.
values = array_ops.where_v2(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Returns a SparseTensor with identity values."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.number_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
return dict(zip(self._fields, self))
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
return cls(**kwargs)
class WeightedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
sparse_weight_tensor = self._transform_weight_tensor(weight_tensor)
sparse_categorical_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.categorical_column, state_manager))
return (sparse_categorical_tensor, sparse_weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class CrossedColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('CrossedColumn',
('keys', 'hash_bucket_size', 'hash_key'))):
"""See `crossed_column`."""
@property
def _is_v2_column(self):
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
continue
if not isinstance(key, FeatureColumn):
return False
if not key._is_v2_column: # pylint: disable=protected-access
return False
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, (FeatureColumn, fc_old._FeatureColumn)): # pylint: disable=protected-access
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = {}
for key in self.keys:
if isinstance(key, FeatureColumn):
config.update(key.parse_example_spec)
elif isinstance(key, fc_old._FeatureColumn): # pylint: disable=protected-access
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(transformation_cache.get(key, state_manager))
elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key.get_sparse_tensors(transformation_cache,
state_manager)
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""See `CategoricalColumn` base class."""
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return list(self.keys)
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys])
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['keys'] = tuple([
deserialize_feature_column(c, custom_objects, columns_by_name)
for c in config['keys']
])
return cls(**kwargs)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `CrossedColumn`.
Returns:
A list of strings or `CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
class IndicatorColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple('IndicatorColumn', ('categorical_column'))):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_indicator'.format(self.categorical_column.name)
def _transform_id_weight_pair(self, id_weight_pair, size):
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(size))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=size, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
def transform_feature(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._transform_id_weight_pair(id_weight_pair,
self.variable_shape[-1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._transform_id_weight_pair(id_weight_pair,
self._variable_shape[-1])
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
if isinstance(self.categorical_column, FeatureColumn):
return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])
else:
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
Raises:
ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return inputs.get(self)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
dense_tensor = transformation_cache.get(self, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
# bath_size is a Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
class SequenceCategoricalColumn(
CategoricalColumn,
fc_old._SequenceCategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('SequenceCategoricalColumn',
('categorical_column'))):
"""Represents sequences of categorical data."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return self.categorical_column.transform_feature(transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors_helper(self, sparse_tensors):
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_sparse_tensors_helper(sparse_tensors)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._get_sparse_tensors_helper(sparse_tensors)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _check_config_keys(config, expected_keys):
"""Checks that a config has all expected_keys."""
if set(config.keys()) != set(expected_keys):
raise ValueError('Invalid config: {}, expected keys: {}'.format(
config, expected_keys))
def _standardize_and_copy_config(config):
"""Returns a shallow copy of config with lists turned to tuples.
Keras serialization uses nest to listify everything.
This causes problems with the NumericColumn shape, which becomes
unhashable. We could try to solve this on the Keras side, but that
would require lots of tracking to avoid changing existing behavior.
Instead, we ensure here that we revive correctly.
Args:
config: dict that will be used to revive a Feature Column
Returns:
Shallow copy of config with lists turned to tuples.
"""
kwargs = config.copy()
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = tuple(v)
return kwargs
def _sanitize_column_name_for_variable_scope(name):
"""Sanitizes user-provided feature names for use as variable scopes."""
invalid_char = re.compile('[^A-Za-z0-9_.\\-]')
return invalid_char.sub('_', name)
|
{
"content_hash": "1467ad60a62e91e3570e1ad4e4c4d8bd",
"timestamp": "",
"source": "github",
"line_count": 4727,
"max_line_length": 128,
"avg_line_length": 40.067696213243075,
"alnum_prop": 0.6746092925026399,
"repo_name": "renyi533/tensorflow",
"id": "7a6bb73d121ced2debd5c9c7993feff14c550eb5",
"size": "190089",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/feature_column/feature_column_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.models import User
from hasi.gadgets.models import Gadget, GadgetSubdevice, UserGadget
def gadgets(request):
# Ladet Userspezifisch die Gadgets in die richtigen Spalten und übergibt sie an
# das Template. Falls kein User eingeloggt ist, kommt "Nicht eingeloggt"
if(request.user.is_authenticated()):
gadgets = Gadget.objects.filter(usergadget__users=request.user)
gadgets_left = gadgets.filter(usergadget__position=0)
gadgets_center = gadgets.filter(usergadget__position=10)
gadgets_right = gadgets.filter(usergadget__position=20)
return render_to_response(
'gadgets/index.html',
{'gadgets_left': gadgets_left,
'gadgets_center': gadgets_center,
'gadgets_right': gadgets_right},
context_instance = RequestContext(request))
else:
if not request.user.is_authenticated():
return HttpResponseRedirect('/login.html?next=%s' % request.path)
|
{
"content_hash": "b0658309271e88895ac532953a9f550b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 43.74074074074074,
"alnum_prop": 0.6875529212531752,
"repo_name": "tscholze/py-hasi-home-analytical-system-interface",
"id": "c238f1661bb41a88eb535ce25a1be9b9c1e4aea5",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hasi/gadgets/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45942"
},
{
"name": "HTML",
"bytes": "53572"
},
{
"name": "JavaScript",
"bytes": "37679"
},
{
"name": "Python",
"bytes": "159855"
}
],
"symlink_target": ""
}
|
"""Diagnostics support for HomeKit Controller."""
from __future__ import annotations
from typing import Any
from aiohomekit.model.characteristics.characteristic_types import CharacteristicsTypes
from homeassistant.components.diagnostics import REDACTED, async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.device_registry import DeviceEntry
from .connection import HKDevice
from .const import KNOWN_DEVICES
REDACTED_CHARACTERISTICS = [
CharacteristicsTypes.SERIAL_NUMBER,
]
REDACTED_CONFIG_ENTRY_KEYS = [
"AccessoryIP",
"iOSDeviceLTSK",
]
REDACTED_STATE = ["access_token", "entity_picture"]
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
return _async_get_diagnostics(hass, entry)
async def async_get_device_diagnostics(
hass: HomeAssistant, entry: ConfigEntry, device: DeviceEntry
) -> dict[str, Any]:
"""Return diagnostics for a device entry."""
return _async_get_diagnostics(hass, entry, device)
@callback
def _async_get_diagnostics_for_device(
hass: HomeAssistant, device: DeviceEntry
) -> dict[str, Any]:
data: dict[str, Any] = {}
data["name"] = device.name
data["model"] = device.model
data["manfacturer"] = device.manufacturer
data["sw_version"] = device.sw_version
data["hw_version"] = device.hw_version
entities = data["entities"] = []
hass_entities = er.async_entries_for_device(
er.async_get(hass),
device_id=device.id,
include_disabled_entities=True,
)
hass_entities.sort(key=lambda entry: entry.original_name or "")
for entity_entry in hass_entities:
state = hass.states.get(entity_entry.entity_id)
state_dict = None
if state:
state_dict = async_redact_data(state.as_dict(), REDACTED_STATE)
state_dict.pop("context", None)
entities.append(
{
"original_name": entity_entry.original_name,
"original_device_class": entity_entry.original_device_class,
"entity_category": entity_entry.entity_category,
"original_icon": entity_entry.original_icon,
"icon": entity_entry.icon,
"unit_of_measurement": entity_entry.unit_of_measurement,
"device_class": entity_entry.device_class,
"disabled": entity_entry.disabled,
"disabled_by": entity_entry.disabled_by,
"state": state_dict,
}
)
return data
@callback
def _async_get_diagnostics(
hass: HomeAssistant, entry: ConfigEntry, device: DeviceEntry | None = None
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
hkid = entry.data["AccessoryPairingID"]
connection: HKDevice = hass.data[KNOWN_DEVICES][hkid]
data: dict[str, Any] = {
"config-entry": {
"title": entry.title,
"version": entry.version,
"data": async_redact_data(entry.data, REDACTED_CONFIG_ENTRY_KEYS),
}
}
# This is the raw data as returned by homekit
# It is roughly equivalent to what is in .storage/homekit_controller-entity-map
# But it also has the latest values seen by the polling or events
data["entity-map"] = accessories = connection.entity_map.serialize()
# It contains serial numbers, which we should strip out
for accessory in accessories:
for service in accessory.get("services", []):
for char in service.get("characteristics", []):
if char["type"] in REDACTED_CHARACTERISTICS:
char["value"] = REDACTED
if device:
data["device"] = _async_get_diagnostics_for_device(hass, device)
else:
device_registry = dr.async_get(hass)
devices = data["devices"] = []
for device_id in connection.devices.values():
if not (device := device_registry.async_get(device_id)):
continue
devices.append(_async_get_diagnostics_for_device(hass, device))
return data
|
{
"content_hash": "1e8ad292783e2dec4859e50b4cd749d2",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 86,
"avg_line_length": 33.27131782945737,
"alnum_prop": 0.651910531220876,
"repo_name": "GenericStudent/home-assistant",
"id": "f83ce7604cf74643cf0cf210ec286890e0a4335a",
"size": "4292",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/diagnostics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
class error(Exception):
pass
import sys # at least we can count on this!
def FileExists(fname):
"""Check if a file exists. Returns true or false.
"""
import os
try:
os.stat(fname)
return 1
except os.error, details:
return 0
def IsPackageDir(path, packageName, knownFileName):
"""Given a path, a ni package name, and possibly a known file name in
the root of the package, see if this path is good.
"""
import os
if knownFileName is None:
knownFileName = "."
return FileExists(os.path.join(os.path.join(path, packageName),knownFileName))
def IsDebug():
"""Return "_d" if we're running a debug version.
This is to be used within DLL names when locating them.
"""
import imp
for suffix_item in imp.get_suffixes():
if suffix_item[0]=='_d.pyd':
return '_d'
return ''
def FindPackagePath(packageName, knownFileName, searchPaths):
"""Find a package.
Given a ni style package name, check the package is registered.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
import regutil, os
pathLook = regutil.GetRegisteredNamedPath(packageName)
if pathLook and IsPackageDir(pathLook, packageName, knownFileName):
return pathLook, None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if IsPackageDir(pathLook, packageName, knownFileName):
# Found it
ret = os.path.abspath(pathLook)
return ret, ret
raise error("The package %s can not be located" % packageName)
def FindHelpPath(helpFile, helpDesc, searchPaths):
# See if the current registry entry is OK
import os, win32api, win32con
try:
key = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows\\Help", 0, win32con.KEY_ALL_ACCESS)
try:
try:
path = win32api.RegQueryValueEx(key, helpDesc)[0]
if FileExists(os.path.join(path, helpFile)):
return os.path.abspath(path)
except win32api.error:
pass # no registry entry.
finally:
key.Close()
except win32api.error:
pass
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, helpFile)):
return os.path.abspath(pathLook)
pathLook = os.path.join(pathLook, "Help")
if FileExists(os.path.join( pathLook, helpFile)):
return os.path.abspath(pathLook)
raise error("The help file %s can not be located" % helpFile)
def FindAppPath(appName, knownFileName, searchPaths):
"""Find an application.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
# Look in the first path.
import regutil, string, os
regPath = regutil.GetRegisteredNamedPath(appName)
if regPath:
pathLook = regPath.split(";")[0]
if regPath and FileExists(os.path.join(pathLook, knownFileName)):
return None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, knownFileName)):
# Found it
return os.path.abspath(pathLook)
raise error("The file %s can not be located for application %s" % (knownFileName, appName))
def FindPythonExe(exeAlias, possibleRealNames, searchPaths):
"""Find an exe.
Returns the full path to the .exe, and a boolean indicating if the current
registered entry is OK. We don't trust the already registered version even
if it exists - it may be wrong (ie, for a different Python version)
"""
import win32api, regutil, string, os, sys
if possibleRealNames is None:
possibleRealNames = exeAlias
# Look first in Python's home.
found = os.path.join(sys.prefix, possibleRealNames)
if not FileExists(found): # for developers
if "64 bit" in sys.version:
found = os.path.join(sys.prefix, "PCBuild", "amd64", possibleRealNames)
else:
found = os.path.join(sys.prefix, "PCBuild", possibleRealNames)
if not FileExists(found):
found = LocateFileName(possibleRealNames, searchPaths)
registered_ok = 0
try:
registered = win32api.RegQueryValue(regutil.GetRootKey(), regutil.GetAppPathsKey() + "\\" + exeAlias)
registered_ok = found==registered
except win32api.error:
pass
return found, registered_ok
def QuotedFileName(fname):
"""Given a filename, return a quoted version if necessary
"""
import regutil, string
try:
fname.index(" ") # Other chars forcing quote?
return '"%s"' % fname
except ValueError:
# No space in name.
return fname
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import regutil, string, os
fileNames = fileNamesString.split(";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32ui, win32con
except ImportError:
raise error("Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file." % fileName)
# Display a common dialog to locate the file.
flags=win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext,ext)
dlg = win32ui.CreateFileDialog(1,None,fileName,flags,filter,None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() != win32con.IDOK:
raise KeyboardInterrupt("User cancelled the process")
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
def LocatePath(fileName, searchPaths):
"""Like LocateFileName, but returns a directory only.
"""
import os
return os.path.abspath(os.path.split(LocateFileName(fileName, searchPaths))[0])
def LocateOptionalPath(fileName, searchPaths):
"""Like LocatePath, but returns None if the user cancels.
"""
try:
return LocatePath(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocateOptionalFileName(fileName, searchPaths = None):
"""Like LocateFileName, but returns None if the user cancels.
"""
try:
return LocateFileName(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocatePythonCore(searchPaths):
"""Locate and validate the core Python directories. Returns a list
of paths that should be used as the core (ie, un-named) portion of
the Python path.
"""
import os, regutil
currentPath = regutil.GetRegisteredNamedPath(None)
if currentPath:
presearchPaths = currentPath.split(";")
else:
presearchPaths = [os.path.abspath(".")]
libPath = None
for path in presearchPaths:
if FileExists(os.path.join(path, "os.py")):
libPath = path
break
if libPath is None and searchPaths is not None:
libPath = LocatePath("os.py", searchPaths)
if libPath is None:
raise error("The core Python library could not be located.")
corePath = None
suffix = IsDebug()
for path in presearchPaths:
if FileExists(os.path.join(path, "unicodedata%s.pyd" % suffix)):
corePath = path
break
if corePath is None and searchPaths is not None:
corePath = LocatePath("unicodedata%s.pyd" % suffix, searchPaths)
if corePath is None:
raise error("The core Python path could not be located.")
installPath = os.path.abspath(os.path.join(libPath, ".."))
return installPath, [libPath, corePath]
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName = None):
"""Find and Register a package.
Assumes the core registry setup correctly.
In addition, if the location located by the package is already
in the **core** path, then an entry is registered, but no path.
(no other paths are checked, as the application whose path was used
may later be uninstalled. This should not happen with the core)
"""
import regutil, string
if not packageName: raise error("A package name must be supplied")
corePaths = regutil.GetRegisteredNamedPath(None).split(";")
if not searchPaths: searchPaths = corePaths
registryAppName = registryAppName or packageName
try:
pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths)
if pathAdd is not None:
if pathAdd in corePaths:
pathAdd = ""
regutil.RegisterNamedPath(registryAppName, pathAdd)
return pathLook
except error, details:
print "*** The %s package could not be registered - %s" % (packageName, details)
print "*** Please ensure you have passed the correct paths on the command line."
print "*** - For packages, you should pass a path to the packages parent directory,"
print "*** - and not the package directory itself..."
def FindRegisterApp(appName, knownFiles, searchPaths):
"""Find and Register a package.
Assumes the core registry setup correctly.
"""
import regutil, string
if type(knownFiles)==type(''):
knownFiles = [knownFiles]
paths=[]
try:
for knownFile in knownFiles:
pathLook = FindAppPath(appName, knownFile, searchPaths)
if pathLook:
paths.append(pathLook)
except error, details:
print "*** ", details
return
regutil.RegisterNamedPath(appName, ";".join(paths))
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames = None):
"""Find and Register a Python exe (not necessarily *the* python.exe)
Assumes the core registry setup correctly.
"""
import regutil, string
fname, ok = FindPythonExe(exeAlias, actualFileNames, searchPaths)
if not ok:
regutil.RegisterPythonExe(fname, exeAlias)
return fname
def FindRegisterHelpFile(helpFile, searchPaths, helpDesc = None ):
import regutil
try:
pathLook = FindHelpPath(helpFile, helpDesc, searchPaths)
except error, details:
print "*** ", details
return
# print "%s found at %s" % (helpFile, pathLook)
regutil.RegisterHelpFile(helpFile, pathLook, helpDesc)
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import os
import regutil, win32api,win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print corePaths
regutil.RegisterNamedPath(None, ';'.join(corePaths))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey() , regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = os.path.abspath( os.path.split(win32api.__file__)[0]) + ";" + \
os.path.abspath( os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path ) )[0] )
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if "64 bit" in sys.version:
check = os.path.join(check, "amd64")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild",check)
def RegisterShellInfo(searchPaths):
"""Registers key parts of the Python installation with the Windows Shell.
Assumes a valid, minimal Python installation exists
(ie, SetupCore() has been previously successfully run)
"""
import regutil, win32con
suffix = IsDebug()
# Set up a pointer to the .exe's
exePath = FindRegisterPythonExe("Python%s.exe" % suffix, searchPaths)
regutil.SetRegistryDefaultValue(".py", "Python.File", win32con.HKEY_CLASSES_ROOT)
regutil.RegisterShellCommand("Open", QuotedFileName(exePath)+" \"%1\" %*", "&Run")
regutil.SetRegistryDefaultValue("Python.File\\DefaultIcon", "%s,0" % exePath, win32con.HKEY_CLASSES_ROOT)
FindRegisterHelpFile("Python.hlp", searchPaths, "Main Python Documentation")
FindRegisterHelpFile("ActivePython.chm", searchPaths, "Main Python Documentation")
# We consider the win32 core, as it contains all the win32 api type
# stuff we need.
# FindRegisterApp("win32", ["win32con.pyc", "win32api%s.pyd" % suffix], searchPaths)
usage = """\
regsetup.py - Setup/maintain the registry for Python apps.
Run without options, (but possibly search paths) to repair a totally broken
python registry setup. This should allow other options to work.
Usage: %s [options ...] paths ...
-p packageName -- Find and register a package. Looks in the paths for
a sub-directory with the name of the package, and
adds a path entry for the package.
-a appName -- Unconditionally add an application name to the path.
A new path entry is create with the app name, and the
paths specified are added to the registry.
-c -- Add the specified paths to the core Pythonpath.
If a path appears on the core path, and a package also
needs that same path, the package will not bother
registering it. Therefore, By adding paths to the
core path, you can avoid packages re-registering the same path.
-m filename -- Find and register the specific file name as a module.
Do not include a path on the filename!
--shell -- Register everything with the Win95/NT shell.
--upackage name -- Unregister the package
--uapp name -- Unregister the app (identical to --upackage)
--umodule name -- Unregister the module
--description -- Print a description of the usage.
--examples -- Print examples of usage.
""" % sys.argv[0]
description="""\
If no options are processed, the program attempts to validate and set
the standard Python path to the point where the standard library is
available. This can be handy if you move Python to a new drive/sub-directory,
in which case most of the options would fail (as they need at least string.py,
os.py etc to function.)
Running without options should repair Python well enough to run with
the other options.
paths are search paths that the program will use to seek out a file.
For example, when registering the core Python, you may wish to
provide paths to non-standard places to look for the Python help files,
library files, etc.
See also the "regcheck.py" utility which will check and dump the contents
of the registry.
"""
examples="""\
Examples:
"regsetup c:\\wierd\\spot\\1 c:\\wierd\\spot\\2"
Attempts to setup the core Python. Looks in some standard places,
as well as the 2 wierd spots to locate the core Python files (eg, Python.exe,
python14.dll, the standard library and Win32 Extensions.
"regsetup -a myappname . .\subdir"
Registers a new Pythonpath entry named myappname, with "C:\\I\\AM\\HERE" and
"C:\\I\\AM\\HERE\subdir" added to the path (ie, all args are converted to
absolute paths)
"regsetup -c c:\\my\\python\\files"
Unconditionally add "c:\\my\\python\\files" to the 'core' Python path.
"regsetup -m some.pyd \\windows\\system"
Register the module some.pyd in \\windows\\system as a registered
module. This will allow some.pyd to be imported, even though the
windows system directory is not (usually!) on the Python Path.
"regsetup --umodule some"
Unregister the module "some". This means normal import rules then apply
for that module.
"""
if __name__=='__main__':
if len(sys.argv)>1 and sys.argv[1] in ['/?','-?','-help','-h']:
print usage
elif len(sys.argv)==1 or not sys.argv[1][0] in ['/','-']:
# No args, or useful args.
searchPath = sys.path[:]
for arg in sys.argv[1:]:
searchPath.append(arg)
# Good chance we are being run from the "regsetup.py" directory.
# Typically this will be "\somewhere\win32\Scripts" and the
# "somewhere" and "..\Lib" should also be searched.
searchPath.append("..\\Build")
searchPath.append("..\\Lib")
searchPath.append("..")
searchPath.append("..\\..")
# for developers:
# also search somewhere\lib, ..\build, and ..\..\build
searchPath.append("..\\..\\lib")
searchPath.append("..\\build")
if "64 bit" in sys.version:
searchPath.append("..\\..\\pcbuild\\amd64")
else:
searchPath.append("..\\..\\pcbuild")
print "Attempting to setup/repair the Python core"
SetupCore(searchPath)
RegisterShellInfo(searchPath)
FindRegisterHelpFile("PyWin32.chm", searchPath, "Pythonwin Reference")
# Check the registry.
print "Registration complete - checking the registry..."
import regcheck
regcheck.CheckRegistry()
else:
searchPaths = []
import getopt, string
opts, args = getopt.getopt(sys.argv[1:], 'p:a:m:c',
['shell','upackage=','uapp=','umodule=','description','examples'])
for arg in args:
searchPaths.append(arg)
for o,a in opts:
if o=='--description':
print description
if o=='--examples':
print examples
if o=='--shell':
print "Registering the Python core."
RegisterShellInfo(searchPaths)
if o=='-p':
print "Registering package", a
FindRegisterPackage(a,None,searchPaths)
if o in ['--upackage', '--uapp']:
import regutil
print "Unregistering application/package", a
regutil.UnregisterNamedPath(a)
if o=='-a':
import regutil
path = ";".join(searchPaths)
print "Registering application", a,"to path",path
regutil.RegisterNamedPath(a,path)
if o=='-c':
if not len(searchPaths):
raise error("-c option must provide at least one additional path")
import win32api, regutil
currentPaths = regutil.GetRegisteredNamedPath(None).split(";")
oldLen = len(currentPaths)
for newPath in searchPaths:
if newPath not in currentPaths:
currentPaths.append(newPath)
if len(currentPaths)!=oldLen:
print "Registering %d new core paths" % (len(currentPaths)-oldLen)
regutil.RegisterNamedPath(None,";".join(currentPaths))
else:
print "All specified paths are already registered."
|
{
"content_hash": "fe81f82bef920021a19a2ed5efb2285f",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 182,
"avg_line_length": 38.59574468085106,
"alnum_prop": 0.6450836924927333,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "b28848d8c98f641ea911eadfdd0199b0206fb48c",
"size": "19994",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "Lib/site-packages/pywin32-219-py2.7-win32.egg/scripts/regsetup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
import numpy as np
trX = np.linspace(-1, 1, 101)
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33 # create a y value which is approximately linear but with some random noise
X = tf.placeholder("float") # create symbolic variables
Y = tf.placeholder("float")
def model(X, w):
return tf.multiply(X, w) # lr is just X*w so this model line is pretty simple
w = tf.Variable(0.0, name="weights") # create a shared variable (like theano.shared) for the weight matrix
y_model = model(X, w)
cost = tf.square(Y - y_model) # use square error for cost function
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # construct an optimizer to minimize cost and fit line to my data
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize variables (in this case just variable W)
tf.global_variables_initializer().run()
for i in range(100):
for (x, y) in zip(trX, trY):
sess.run(train_op, feed_dict={X: x, Y: y})
print(sess.run(w))
|
{
"content_hash": "2168df3bedd892e82a1a5cc2d1a63ea5",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 131,
"avg_line_length": 33.38709677419355,
"alnum_prop": 0.6879227053140097,
"repo_name": "AdityaSoni19031997/Machine-Learning",
"id": "5d93b5401027b9befd814e91728dc28c0269857e",
"size": "1035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__TensorFlow__/Linear_Regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "77547639"
},
{
"name": "Python",
"bytes": "903037"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
}
|
import os
# import kivy
import kivy
kivy.require('1.9.1')
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
# import whole core managers
from pocketthrone.managers.locator import L
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.managers.filemanager import FileManager
from pocketthrone.managers.inputmanager import InputManager
from pocketthrone.managers.gameloopmanager import GameLoopManager
from pocketthrone.managers.modmanager import ModManager
from pocketthrone.managers.unitmanager import UnitManager
from pocketthrone.managers.citymanager import CityManager
from pocketthrone.managers.mapmanager import MapManager
from pocketthrone.managers.widgetmanager import WidgetManager
from pocketthrone.managers.playermanager import PlayerManager
# import entities
from pocketthrone.entities.unit import Unit
from pocketthrone.entities.tile import Tile
from pocketthrone.entities.building import Building
from pocketthrone.entities.event import *
# import widget classes
from pocketthrone.widgets.mapwidget import MapWidget
from pocketthrone.widgets.bottombar import BottomBar
from pocketthrone.widgets.sidebar import SideBar
from pocketthrone.tools.maploader import MapLoader
class PocketThroneApp(App):
# set mod and map to load here
MOD_NAME = "ancientlies"
MAP_NAME = "highland_bridge"
# auto-set display size before cstarting
def build_config(self, config):
config.setdefaults('graphics', {
'width': 800,
'height': 600
})
# build & return "root" widget
def build(self):
# initialize game basics
self.bootstrap()
self.initialize_game_dir()
self.initialize_mod()
self.initialize_map()
self.initialize_manager_locator()
# initialize L
# initialize user interface
self._build_user_interface()
root_layout = L.WidgetManager.get_widget("root_layout")
# start game loop and return root FloatLayout
self._start_game_loop()
return root_layout
def on_start(self):
print "Application started"
# boot game core
def bootstrap(self):
L.InputManager = InputManager()
L.GameLoopManager = GameLoopManager()
L.WidgetManager = WidgetManager()
# initialize game root directory
def initialize_game_dir(self):
game_root = os.path.abspath(__file__ + "/../../")
L.RootDirectory = game_root
FileManager.set_game_root(game_root)
# initialize L manager holder
def initialize_manager_locator(self):
# set basic managers in L class
# Manager initialization inside L holder class
L.PlayerManager = PlayerManager()
L.UnitManager = UnitManager()
L.CityManager = CityManager()
# load and set Mod to start
def initialize_mod(self):
L.ModManager = ModManager(mod_name = self.MOD_NAME)
# load and set TileMap to start
def initialize_map(self):
L.MapManager = MapManager(map_name=self.MAP_NAME)
tilemap = L.MapManager.load_map(self.MAP_NAME)
L.TileMap = tilemap
# make the root kivy Layout and register it in WidgetManager
def _build_user_interface(self):
# create root kivy Layout
root_layout = FloatLayout(pos=(0,0), size=(800, 600))
L.WidgetManager.register("root_layout", root_layout)
# add MapWidget to root and WidgetManager
mapwidget = MapWidget()
bottombar = BottomBar()
root_layout.add_widget(mapwidget)
root_layout.add_widget(bottombar)
L.WidgetManager.register("mapwidget", mapwidget)
L.WidgetManager.register("bottombar", bottombar)
return root_layout
def _start_game_loop(self):
# start loop & first turn
L.GameLoopManager.run()
L.PlayerManager.start_game()
|
{
"content_hash": "e356686db90c5136303c3c6a17865330",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 65,
"avg_line_length": 31.47787610619469,
"alnum_prop": 0.7745290975541186,
"repo_name": "herrschr/prey-game",
"id": "a40257d4b797519af3e3d8a78ab5cba8f8c980d9",
"size": "3608",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pocketthrone/app.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "109218"
},
{
"name": "Shell",
"bytes": "22"
}
],
"symlink_target": ""
}
|
"""
Single VsOne Chip Match Interface
For VsMany Interaction
Interaction for looking at matches between a single query and database annotation
Main development file
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-show_coverage --show
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import numpy as np
import plottool as pt
import six
from plottool import interact_helpers as ih
from ibeis import viz
from ibeis.algo.hots import scoring
from ibeis.algo.hots import hstypes
from ibeis.viz import viz_helpers as vh
from ibeis.viz import viz_hough
from ibeis.viz import viz_chip
from plottool import interact_matches
from ibeis.viz.interact.interact_chip import ishow_chip
(print, rrr, profile) = ut.inject2(__name__, '[interact_matches]')
def testdata_match_interact(**kwargs):
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-testdata_match_interact --show --db PZ_MTEST --qaid 3
Example:
>>> # VIZ_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> import plottool as pt
>>> kwargs = {}
>>> mx = ut.get_argval('--mx', type_=int, default=None)
>>> self = testdata_match_interact(mx=mx, **kwargs)
>>> pt.show_if_requested()
"""
import ibeis
qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', t=['default:Knorm=3'])
ibs = qreq_.ibs
cm = qreq_.execute()[0]
cm.sortself()
aid2 = None
self = MatchInteraction(ibs, cm, aid2, mode=1, dodraw=False, qreq_=qreq_, **kwargs)
self.start()
return self
# TODO inherit from AbstractInteraction
@six.add_metaclass(ut.ReloadingMetaclass)
class MatchInteraction(interact_matches.MatchInteraction2):
"""
Plots a chip result and sets up callbacks for interaction.
SeeAlso:
plottool.interact_matches.MatchInteraction2
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-testdata_match_interact --show --db PZ_MTEST --qaid 3
"""
def __init__(self, ibs, cm, aid2=None, fnum=None,
qreq_=None, figtitle='Match Interaction',
**kwargs):
#print('[ibs] MatchInteraction.__init__')
self.ibs = ibs
self.cm = cm
self.qreq_ = qreq_
# Unpack Args
if aid2 is None:
index = 0
# FIXME: no sortself
cm.sortself()
self.rank = index
else:
index = cm.daid2_idx.get(aid2, None)
# TODO: rank?
self.rank = None
if index is not None:
self.qaid = self.cm.qaid
self.daid = self.cm.daid_list[index]
fm = self.cm.fm_list[index]
fk = self.cm.fk_list[index]
fsv = self.cm.fsv_list[index]
if self.cm.fs_list is None:
fs_list = self.cm.get_fsv_prod_list()
else:
fs_list = self.cm.fs_list
fs = None if fs_list is None else fs_list[index]
H1 = None if self.cm.H_list is None else cm.H_list[index]
self.score = None if self.cm.score_list is None else self.cm.score_list[index]
else:
self.qaid = self.cm.qaid
self.daid = aid2
fm = np.empty((0, 2), dtype=hstypes.FM_DTYPE)
fk = np.empty(0, dtype=hstypes.FK_DTYPE)
fsv = np.empty((0, 2), dtype=hstypes.FS_DTYPE)
fs = np.empty(0, dtype=hstypes.FS_DTYPE)
H1 = None
self.score = None
# Read properties
self.query_config2_ = (None if self.qreq_ is None else
self.qreq_.extern_query_config2)
self.data_config2_ = (None if self.qreq_ is None else
self.qreq_.extern_data_config2)
rchip1 = vh.get_chips(ibs, [self.qaid], config2_=self.query_config2_)[0]
rchip2 = vh.get_chips(ibs, [self.daid], config2_=self.data_config2_)[0]
kpts1 = ibs.get_annot_kpts([self.qaid], config2_=self.query_config2_)[0]
kpts2 = ibs.get_annot_kpts([self.daid], config2_=self.data_config2_)[0]
vecs1 = ibs.get_annot_vecs([self.qaid], config2_=self.query_config2_)[0]
vecs2 = ibs.get_annot_vecs([self.daid], config2_=self.data_config2_)[0]
self.figtitle = figtitle
self.kwargs = kwargs
self.fnum2 = pt.next_fnum()
super(MatchInteraction, self).__init__(rchip1, rchip2, kpts1, kpts2,
fm, fs, fsv, vecs1, vecs2, H1,
H2=None, fk=fk, fnum=fnum,
**kwargs)
#def plot(self, fnum, pnum):
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_):
"""
just visualizes the matches using some type of lines
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-chipmatch_view --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> self = testdata_match_interact()
>>> self.chipmatch_view()
>>> pt.show_if_requested()
"""
if fnum is None:
fnum = self.fnum
if verbose is None:
verbose = ut.VERBOSE
ibs = self.ibs
aid = self.daid
qaid = self.qaid
figtitle = self.figtitle
# drawing mode draw: with/without lines/feats
mode = kwargs_.get('mode', self.mode)
draw_ell = mode >= 1
draw_lines = mode == 2
#self.mode = (self.mode + 1) % 3
pt.figure(fnum=fnum, docla=True, doclf=True)
show_matches_kw = self.kwargs.copy()
show_matches_kw.update(
dict(fnum=fnum, pnum=pnum, draw_lines=draw_lines,
draw_ell=draw_ell, colorbar_=True, vert=self.vert))
show_matches_kw.update(kwargs_)
if self.warp_homog:
show_matches_kw['H1'] = self.H1
#show_matches_kw['score'] = self.score
show_matches_kw['rawscore'] = self.score
show_matches_kw['aid2_raw_rank'] = self.rank
tup = viz.viz_matches.show_matches2(ibs, self.qaid, self.daid,
self.fm, self.fs,
qreq_=self.qreq_,
**show_matches_kw)
ax, xywh1, xywh2 = tup
self.xywh2 = xywh2
pt.set_figtitle(figtitle + ' ' + vh.get_vsstr(qaid, aid))
def sv_view(self, dodraw=True):
""" spatial verification view
"""
#fnum = viz.FNUMS['special']
aid = self.daid
fnum = pt.next_fnum()
fig = pt.figure(fnum=fnum, docla=True, doclf=True)
ih.disconnect_callback(fig, 'button_press_event')
viz.viz_sver.show_sver(self.ibs, self.qaid, aid2=aid, fnum=fnum)
if dodraw:
#self.draw()
pt.draw()
def show_coverage(self, dodraw=True):
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-show_coverage --show
python -m ibeis.viz.interact.interact_matches --test-show_coverage
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> self = testdata_match_interact(mx=1)
>>> self.show_coverage(dodraw=False)
>>> pt.show_if_requested()
"""
masks_list = scoring.get_masks(self.qreq_, self.cm)
scoring.show_coverage_mask(self.qreq_, self.cm, masks_list)
if dodraw:
#self.draw()
pt.draw()
def show_each_chip(self):
viz_chip.show_chip(self.ibs, self.qaid, fnum=pt.next_fnum(), nokpts=True)
viz_chip.show_chip(self.ibs, self.daid, fnum=pt.next_fnum(), nokpts=True)
pt.draw()
#self.draw()
def show_each_fgweight_chip(self):
viz_chip.show_chip(self.ibs, self.qaid, fnum=pt.next_fnum(),
weight_label='fg_weights')
viz_chip.show_chip(self.ibs, self.daid, fnum=pt.next_fnum(),
weight_label='fg_weights')
#self.draw()
pt.draw()
def show_each_dstncvs_chip(self, dodraw=True):
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-show_each_dstncvs_chip --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> self = testdata_match_interact(mx=1)
>>> self.show_each_dstncvs_chip(dodraw=False)
>>> pt.show_if_requested()
"""
dstncvs1, dstncvs2 = scoring.get_kpts_distinctiveness(self.ibs,
[self.qaid,
self.daid])
print('dstncvs1_stats = ' + ut.get_stats_str(dstncvs1))
print('dstncvs2_stats = ' + ut.get_stats_str(dstncvs2))
weight_label = 'dstncvs'
showkw = dict(weight_label=weight_label, ell=False, pts=True)
viz_chip.show_chip(self.ibs, self.qaid, weights=dstncvs1,
fnum=pt.next_fnum(), **showkw)
viz_chip.show_chip(self.ibs, self.daid, weights=dstncvs2,
fnum=pt.next_fnum(), **showkw)
if dodraw:
#self.draw()
pt.draw()
def show_each_probchip(self):
viz_hough.show_probability_chip(self.ibs, self.qaid, fnum=pt.next_fnum())
viz_hough.show_probability_chip(self.ibs, self.daid, fnum=pt.next_fnum())
pt.draw()
#self.draw()
def dev_reload(self):
ih.disconnect_callback(self.fig, 'button_press_event')
self.rrr()
self.set_callbacks()
def dev_embed(self):
ut.embed()
def toggle_samefig(self):
self.same_fig = not self.same_fig
if self.mx is not None:
self.select_ith_match(self.mx)
self.draw()
def query_last_feature(self):
ibs = self.ibs
qaid = self.qaid
viz.show_nearest_descriptors(ibs, qaid, self.last_fx, pt.next_fnum(),
qreq_=self.qreq_, draw_chip=True)
fig3 = pt.gcf()
ih.connect_callback(fig3, 'button_press_event', self.on_click)
pt.draw()
def get_popup_options(self):
from ibeis.gui import inspect_gui
options = []
ax = pt.gca() # HACK
from plottool import plot_helpers as ph
viztype = ph.get_plotdat(ax, 'viztype', '')
is_match_type = viztype in ['matches', 'multi_match']
if is_match_type:
options += inspect_gui.get_aidpair_context_menu_options(
self.ibs, self.qaid, self.daid, self.cm,
qreq_=self.qreq_,
#update_callback=self.show_page,
#backend_callback=None, aid_list=aid_list)
)
options += [
#('Toggle same_fig', self.toggle_samefig),
#('Toggle vert', self.toggle_vert),
('query last feature', self.query_last_feature),
('show each chip', self.show_each_chip),
('show each distinctiveness chip', self.show_each_dstncvs_chip),
('show each foreground weight chip', self.show_each_fgweight_chip),
('show each probchip', self.show_each_probchip),
('show coverage', self.show_coverage),
#('show each probchip', self.query_last_feature),
]
#options.append(('name_interaction', self.name_interaction))
#if self.H1 is not None:
# options.append(('Toggle homog', self.toggle_homog))
if ut.is_developer():
options.append(('dev_reload', self.dev_reload))
options.append(('dev_embed', self.dev_embed))
#options.append(('cancel', lambda: print('cancel')))
options += super(MatchInteraction, self).get_popup_options()
return options
#self.show_popup_menu(options, event)
# Callback
def on_click_inside(self, event, ax):
from plottool import plot_helpers as ph
ibs = self.ibs
viztype = ph.get_plotdat(ax, 'viztype', '')
is_match_type = viztype in ['matches', 'multi_match']
key = '' if event.key is None else event.key
print('key=%r ' % key)
ctrl_down = key.find('control') == 0
# Click in match axes
if event.button == 3:
return super(MatchInteraction, self).on_click_inside(event, ax)
if is_match_type and ctrl_down:
# Ctrl-Click
print('.. control click')
return self.sv_view()
elif viztype in ['warped', 'unwarped']:
print('clicked at patch')
ut.print_dict(ph.get_plotdat_dict(ax))
hs_aid = {
'aid1': self.qaid,
'aid2': self.daid,
}[vh.get_ibsdat(ax, 'aid', None)]
hs_fx = vh.get_ibsdat(ax, 'fx', None)
print('hs_fx = %r' % (hs_fx,))
print('hs_aid = %r' % (hs_aid,))
if hs_aid is not None and viztype == 'unwarped':
ishow_chip(ibs, hs_aid, fx=hs_fx, fnum=pt.next_fnum())
elif hs_aid is not None and viztype == 'warped':
viz.show_keypoint_gradient_orientations(ibs, hs_aid, hs_fx,
fnum=pt.next_fnum())
else:
return super(MatchInteraction, self).on_click_inside(event, ax)
self.draw()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches
python -m ibeis.viz.interact.interact_matches --allexamples
python -m ibeis.viz.interact.interact_matches --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
{
"content_hash": "40380c148b28caab074d07f5aee8de87",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 114,
"avg_line_length": 37.938337801608576,
"alnum_prop": 0.5533177867288531,
"repo_name": "SU-ECE-17-7/ibeis",
"id": "7923c644fe8fc277146b8d6711ed96a300478532",
"size": "14175",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "ibeis/viz/interact/interact_matches.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "331"
},
{
"name": "CSS",
"bytes": "26792"
},
{
"name": "HTML",
"bytes": "33762203"
},
{
"name": "Inno Setup",
"bytes": "1585"
},
{
"name": "JavaScript",
"bytes": "227454"
},
{
"name": "Jupyter Notebook",
"bytes": "66346367"
},
{
"name": "Python",
"bytes": "6112508"
},
{
"name": "Shell",
"bytes": "58211"
}
],
"symlink_target": ""
}
|
"""EC-3PO EC Interpreter
interpreter provides the interpretation layer between the EC UART and the user.
It receives commands through its command pipe, formats the commands for the EC,
and sends the command to the EC. It also presents data from the EC to either be
displayed via the interactive console interface, or some other consumer. It
additionally supports automatic command retrying if the EC drops a character in
a command.
"""
from __future__ import print_function
import binascii
# pylint: disable=cros-logging-import
import logging
import os
import Queue
import select
import sys
COMMAND_RETRIES = 3 # Number of attempts to retry a command.
EC_MAX_READ = 1024 # Max bytes to read at a time from the EC.
EC_SYN = '\xec' # Byte indicating EC interrogation.
EC_ACK = '\xc0' # Byte representing correct EC response to interrogation.
class LoggerAdapter(logging.LoggerAdapter):
"""Class which provides a small adapter for the logger."""
def process(self, msg, kwargs):
"""Prepends the served PTY to the beginning of the log message."""
return '%s - %s' % (self.extra['pty'], msg), kwargs
class Interpreter(object):
"""Class which provides the interpretation layer between the EC and user.
This class essentially performs all of the intepretation for the EC and the
user. It handles all of the automatic command retrying as well as the
formation of commands for EC images which support that.
Attributes:
logger: A logger for this module.
ec_uart_pty: An opened file object to the raw EC UART PTY.
ec_uart_pty_name: A string containing the name of the raw EC UART PTY.
cmd_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the command pipe. This must be a bidirectional pipe.
Commands and responses will utilize this pipe.
dbg_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the debug pipe. This must be a unidirectional pipe
with write capabilities. EC debug output will utilize this pipe.
cmd_retries: An integer representing the number of attempts the console
should retry commands if it receives an error.
log_level: An integer representing the numeric value of the log level.
inputs: A list of objects that the intpreter selects for reading.
Initially, these are the EC UART and the command pipe.
outputs: A list of objects that the interpreter selects for writing.
ec_cmd_queue: A FIFO queue used for sending commands down to the EC UART.
last_cmd: A string that represents the last command sent to the EC. If an
error is encountered, the interpreter will attempt to retry this command
up to COMMAND_RETRIES.
enhanced_ec: A boolean indicating if the EC image that we are currently
communicating with is enhanced or not. Enhanced EC images will support
packed commands and host commands over the UART. This defaults to False
and is changed depending on the result of an interrogation.
interrogating: A boolean indicating if we are in the middle of interrogating
the EC.
connected: A boolean indicating if the interpreter is actually connected to
the UART and listening.
"""
def __init__(self, ec_uart_pty, cmd_pipe, dbg_pipe, log_level=logging.INFO):
"""Intializes an Interpreter object with the provided args.
Args:
ec_uart_pty: A string representing the EC UART to connect to.
cmd_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the command pipe. This must be a bidirectional
pipe. Commands and responses will utilize this pipe.
dbg_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the debug pipe. This must be a unidirectional pipe
with write capabilities. EC debug output will utilize this pipe.
cmd_retries: An integer representing the number of attempts the console
should retry commands if it receives an error.
log_level: An optional integer representing the numeric value of the log
level. By default, the log level will be logging.INFO (20).
"""
logger = logging.getLogger('EC3PO.Interpreter')
self.logger = LoggerAdapter(logger, {'pty': ec_uart_pty})
self.ec_uart_pty = open(ec_uart_pty, 'a+')
self.ec_uart_pty_name = ec_uart_pty
self.cmd_pipe = cmd_pipe
self.dbg_pipe = dbg_pipe
self.cmd_retries = COMMAND_RETRIES
self.log_level = log_level
self.inputs = [self.ec_uart_pty, self.cmd_pipe]
self.outputs = []
self.ec_cmd_queue = Queue.Queue()
self.last_cmd = ''
self.enhanced_ec = False
self.interrogating = False
self.connected = True
def __str__(self):
"""Show internal state of the Interpreter object.
Returns:
A string that shows the values of the attributes.
"""
string = []
string.append('%r' % self)
string.append('ec_uart_pty: %s' % self.ec_uart_pty)
string.append('cmd_pipe: %r' % self.cmd_pipe)
string.append('dbg_pipe: %r' % self.dbg_pipe)
string.append('cmd_retries: %d' % self.cmd_retries)
string.append('log_level: %d' % self.log_level)
string.append('inputs: %r' % self.inputs)
string.append('outputs: %r' % self.outputs)
string.append('ec_cmd_queue: %r' % self.ec_cmd_queue)
string.append('last_cmd: \'%s\'' % self.last_cmd)
string.append('enhanced_ec: %r' % self.enhanced_ec)
string.append('interrogating: %r' % self.interrogating)
return '\n'.join(string)
def EnqueueCmd(self, command):
"""Enqueue a command to be sent to the EC UART.
Args:
command: A string which contains the command to be sent.
"""
self.ec_cmd_queue.put(command)
self.logger.debug('Commands now in queue: %d', self.ec_cmd_queue.qsize())
# Add the EC UART as an output to be serviced.
if self.connected and self.ec_uart_pty not in self.outputs:
self.outputs.append(self.ec_uart_pty)
def PackCommand(self, raw_cmd):
r"""Packs a command for use with error checking.
For error checking, we pack console commands in a particular format. The
format is as follows:
&&[x][x][x][x]&{cmd}\n\n
^ ^ ^^ ^^ ^ ^-- 2 newlines.
| | || || |-- the raw console command.
| | || ||-- 1 ampersand.
| | ||____|--- 2 hex digits representing the CRC8 of cmd.
| |____|-- 2 hex digits reprsenting the length of cmd.
|-- 2 ampersands
Args:
raw_cmd: A pre-packed string which contains the raw command.
Returns:
A string which contains the packed command.
"""
# Don't pack a single carriage return.
if raw_cmd != '\r':
# The command format is as follows.
# &&[x][x][x][x]&{cmd}\n\n
packed_cmd = []
packed_cmd.append('&&')
# The first pair of hex digits are the length of the command.
packed_cmd.append('%02x' % len(raw_cmd))
# Then the CRC8 of cmd.
packed_cmd.append('%02x' % Crc8(raw_cmd))
packed_cmd.append('&')
# Now, the raw command followed by 2 newlines.
packed_cmd.append(raw_cmd)
packed_cmd.append('\n\n')
return ''.join(packed_cmd)
else:
return raw_cmd
def ProcessCommand(self, command):
"""Captures the input determines what actions to take.
Args:
command: A string representing the command sent by the user.
"""
if command == "disconnect":
if self.connected:
self.logger.debug('UART disconnect request.')
# Drop all pending commands if any.
while not self.ec_cmd_queue.empty():
c = self.ec_cmd_queue.get()
self.logger.debug('dropped: \'%s\'', c)
if self.enhanced_ec:
# Reset retry state.
self.cmd_retries = COMMAND_RETRIES
self.last_cmd = ''
# Get the UART that the interpreter is attached to.
fd = self.ec_uart_pty
self.logger.debug('fd: %r', fd)
# Remove the descriptor from the inputs and outputs.
self.inputs.remove(fd)
if fd in self.outputs:
self.outputs.remove(fd)
self.logger.debug('Removed fd. Remaining inputs: %r', self.inputs)
# Close the file.
fd.close()
# Mark the interpreter as disconnected now.
self.connected = False
self.logger.debug('Disconnected from %s.', self.ec_uart_pty_name)
return
elif command == "reconnect":
if not self.connected:
self.logger.debug('UART reconnect request.')
# Reopen the PTY.
fd = open(self.ec_uart_pty_name, 'a+')
self.logger.debug('fd: %r', fd)
self.ec_uart_pty = fd
# Add the descriptor to the inputs.
self.inputs.append(fd)
self.logger.debug('fd added. curr inputs: %r', self.inputs)
# Mark the interpreter as connected now.
self.connected = True
self.logger.debug('Connected to %s.', self.ec_uart_pty_name)
return
# Ignore any other commands while in the disconnected state.
self.logger.debug('command: \'%s\'', command)
if not self.connected:
self.logger.debug('Ignoring command because currently disconnected.')
return
# Remove leading and trailing spaces only if this is an enhanced EC image.
# For non-enhanced EC images, commands will be single characters at a time
# and can be spaces.
if self.enhanced_ec:
command = command.strip(' ')
# There's nothing to do if the command is empty.
if len(command) == 0:
return
# Check for interrogation command.
if command == EC_SYN:
# User is requesting interrogation. Send SYN as is.
self.logger.debug('User requesting interrogation.')
self.interrogating = True
# Assume the EC isn't enhanced until we get a response.
self.enhanced_ec = False
elif self.enhanced_ec:
# Enhanced EC images require the plaintext commands to be packed.
command = self.PackCommand(command)
# TODO(aaboagye): Make a dict of commands and keys and eventually,
# handle partial matching based on unique prefixes.
self.EnqueueCmd(command)
def HandleCmdRetries(self):
"""Attempts to retry commands if possible."""
if self.cmd_retries > 0:
# The EC encountered an error. We'll have to retry again.
self.logger.warning('Retrying command...')
self.cmd_retries -= 1
self.logger.warning('Retries remaining: %d', self.cmd_retries)
# Retry the command and add the EC UART to the writers again.
self.EnqueueCmd(self.last_cmd)
self.outputs.append(self.ec_uart_pty)
else:
# We're out of retries, so just give up.
self.logger.error('Command failed. No retries left.')
# Clear the command in progress.
self.last_cmd = ''
# Reset the retry count.
self.cmd_retries = COMMAND_RETRIES
def SendCmdToEC(self):
"""Sends a command to the EC."""
# If we're retrying a command, just try to send it again.
if self.cmd_retries < COMMAND_RETRIES:
cmd = self.last_cmd
else:
# If we're not retrying, we should not be writing to the EC if we have no
# items in our command queue.
assert not self.ec_cmd_queue.empty()
# Get the command to send.
cmd = self.ec_cmd_queue.get()
# Send the command.
self.ec_uart_pty.write(cmd)
self.ec_uart_pty.flush()
self.logger.debug('Sent command to EC.')
if self.enhanced_ec and cmd != EC_SYN:
# Now, that we've sent the command, store the current command as the last
# command sent. If we encounter an error string, we will attempt to retry
# this command.
if cmd != self.last_cmd:
self.last_cmd = cmd
# Reset the retry count.
self.cmd_retries = COMMAND_RETRIES
# If no command is pending to be sent, then we can remove the EC UART from
# writers. Might need better checking for command retry logic in here.
if self.ec_cmd_queue.empty():
# Remove the EC UART from the writers while we wait for a response.
self.logger.debug('Removing EC UART from writers.')
self.outputs.remove(self.ec_uart_pty)
def HandleECData(self):
"""Handle any debug prints from the EC."""
self.logger.debug('EC has data')
# Read what the EC sent us.
data = os.read(self.ec_uart_pty.fileno(), EC_MAX_READ)
self.logger.debug('got: \'%s\'', binascii.hexlify(data))
if '&E' in data and self.enhanced_ec:
# We received an error, so we should retry it if possible.
self.logger.warning('Error string found in data.')
self.HandleCmdRetries()
return
# If we were interrogating, check the response and update our knowledge
# of the current EC image.
if self.interrogating:
self.enhanced_ec = data == EC_ACK
if self.enhanced_ec:
self.logger.debug('The current EC image seems enhanced.')
else:
self.logger.debug('The current EC image does NOT seem enhanced.')
# Done interrogating.
self.interrogating = False
# For now, just forward everything the EC sends us.
self.logger.debug('Forwarding to user...')
self.dbg_pipe.send(data)
def HandleUserData(self):
"""Handle any incoming commands from the user."""
self.logger.debug('Command data available. Begin processing.')
data = self.cmd_pipe.recv()
# Process the command.
self.ProcessCommand(data)
def Crc8(data):
"""Calculates the CRC8 of data.
The generator polynomial used is: x^8 + x^2 + x + 1.
This is the same implementation that is used in the EC.
Args:
data: A string of data that we wish to calculate the CRC8 on.
Returns:
crc >> 8: An integer representing the CRC8 value.
"""
crc = 0
for byte in data:
crc ^= (ord(byte) << 8)
for _ in range(8):
if crc & 0x8000:
crc ^= (0x1070 << 3)
crc <<= 1
return crc >> 8
def StartLoop(interp):
"""Starts an infinite loop of servicing the user and the EC.
StartLoop checks to see if there are any commands to process, processing them
if any, and forwards EC output to the user.
When sending a command to the EC, we send the command once and check the
response to see if the EC encountered an error when receiving the command. An
error condition is reported to the interpreter by a string with at least one
'&' and 'E'. The full string is actually '&&EE', however it's possible that
the leading ampersand or trailing 'E' could be dropped. If an error is
encountered, the interpreter will retry up to the amount configured.
Args:
interp: An Interpreter object that has been properly initialised.
"""
try:
while True:
readable, writeable, _ = select.select(interp.inputs, interp.outputs, [])
for obj in readable:
# Handle any debug prints from the EC.
if obj is interp.ec_uart_pty:
interp.HandleECData()
# Handle any commands from the user.
elif obj is interp.cmd_pipe:
interp.HandleUserData()
for obj in writeable:
# Send a command to the EC.
if obj is interp.ec_uart_pty:
interp.SendCmdToEC()
finally:
# Close pipes.
interp.cmd_pipe.close()
interp.dbg_pipe.close()
# Close file descriptor.
interp.ec_uart_pty.close()
# Exit.
sys.exit(0)
|
{
"content_hash": "d0376593798d11bc1afe6b9b243b70a3",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 80,
"avg_line_length": 38.191542288557216,
"alnum_prop": 0.6631277274799714,
"repo_name": "akappy7/ChromeOS_EC_LED_Diagnostics",
"id": "23e896c640f86f5a3e69d92966555287bb296fe5",
"size": "15519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/ec3po/interpreter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "87531"
},
{
"name": "C",
"bytes": "6849251"
},
{
"name": "C++",
"bytes": "152075"
},
{
"name": "Makefile",
"bytes": "67499"
},
{
"name": "Objective-C",
"bytes": "722"
},
{
"name": "Perl",
"bytes": "1594"
},
{
"name": "Python",
"bytes": "167796"
},
{
"name": "Shell",
"bytes": "16964"
},
{
"name": "Tcl",
"bytes": "4438"
}
],
"symlink_target": ""
}
|
"""The influence of windowing of log. bandlimited sweep signals when using a
Kaiser Window by fixing beta (=2) and fade_out (=0).
fstart = 100 Hz
fstop = 5000 Hz
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import ir_imitation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter, fftconvolve
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 100
fstop = 5000
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Lists
beta = 7
fade_in_list = np.arange(0, 1001, 1)
fade_out = 0
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
def get_results(fade_in):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_windowed_zeropadded,
system_response,
fs)
return ir
with open("log_sweep_kaiser_window_bandlimited_script5.txt", "w") as f:
for fade_in in fade_in_list:
ir = get_results(fade_in)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_in) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
|
{
"content_hash": "8823fe60b5ab3438bca4b38bb355a699",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 76,
"avg_line_length": 24.50574712643678,
"alnum_prop": 0.6111632270168855,
"repo_name": "spatialaudio/sweep",
"id": "f08ef1568020e669eea3cf187fc1e4bcc22de873",
"size": "2156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "log_sweep_kaiser_window_bandlimited_script5/log_sweep_kaiser_window_bandlimited_script5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101013"
}
],
"symlink_target": ""
}
|
"""Domain objects related to Oppia improvement tasks."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import user_services
from core.platform import models
import python_utils
import utils
(improvements_models,) = (
models.Registry.import_models([models.NAMES.improvements]))
class TaskEntry(python_utils.OBJECT):
"""Domain object representing an actionable task from the improvements tab.
Attributes:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry refers to.
For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str or None. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str or None. The corresponding user who resolved this task.
resolved_on: datetime or None. The datetime at which this task was
resolved.
"""
def __init__(
self, entity_type, entity_id, entity_version, task_type,
target_type, target_id, issue_description, status, resolver_id,
resolved_on):
"""Initializes a new TaskEntry domain object from the given values.
Args:
entity_type: str. The type of entity the task entry refers to.
For example: "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example: an exploration ID.
entity_version: int. The version of the entity the task entry refers
to. For example: an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, when entity type is "exploration": "state".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to describe
why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str. The corresponding user who resolved this task.
Only used when status is resolved, otherwise replaced with None.
resolved_on: datetime. The datetime at which this task was resolved.
Only used when status is resolved, otherwise replaced with None.
"""
if status != improvements_models.TASK_STATUS_RESOLVED:
resolver_id = None
resolved_on = None
self.entity_type = entity_type
self.entity_id = entity_id
self.entity_version = entity_version
self.task_type = task_type
self.target_type = target_type
self.target_id = target_id
self.issue_description = issue_description
self.status = status
self.resolver_id = resolver_id
self.resolved_on = resolved_on
@property
def task_id(self):
"""Returns the unique identifier of this task.
Value has the form: "[entity_type].[entity_id].[entity_version].
[task_type].[target_type].[target_id]"
Returns:
str. The ID of this task.
"""
return improvements_models.TaskEntryModel.generate_task_id(
self.entity_type, self.entity_id, self.entity_version,
self.task_type, self.target_type, self.target_id)
@property
def composite_entity_id(self):
"""Utility field which results in a 20% speedup compared to querying by
each of the invididual fields used to compose it.
Value has the form: "[entity_type].[entity_id].[entity_version]".
Returns:
str. The value of the utility field.
"""
return improvements_models.TaskEntryModel.generate_composite_entity_id(
self.entity_type, self.entity_id, self.entity_version)
def to_dict(self):
"""Returns a dict-representation of the task.
Returns:
dict. Contains the following keys:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry
refers to. For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers
to. For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers
to. For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_username: str. Username of the user who resolved the
task when status is resolved. Otherwise None.
resolver_profile_picture_data_url: str. Profile picture URL of
the user who resolved the task when status is resolved.
Otherwise None.
resolved_on_msecs: float. Time in milliseconds since epoch at
which the task was resolved when status is resolved.
Otherwise None.
"""
resolver_settings = (
self.resolver_id and
user_services.get_user_settings(self.resolver_id, strict=True))
return {
'entity_type': self.entity_type,
'entity_id': self.entity_id,
'entity_version': self.entity_version,
'task_type': self.task_type,
'target_type': self.target_type,
'target_id': self.target_id,
'issue_description': self.issue_description,
'status': self.status,
'resolver_username': (
resolver_settings and resolver_settings.username),
'resolver_profile_picture_data_url': (
resolver_settings and
resolver_settings.profile_picture_data_url),
'resolved_on_msecs': (
self.resolved_on and
utils.get_time_in_millisecs(self.resolved_on)),
}
|
{
"content_hash": "95b9bd8dfdd65863af9bc81a9665c799",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 80,
"avg_line_length": 47.37908496732026,
"alnum_prop": 0.6116705752517588,
"repo_name": "prasanna08/oppia",
"id": "83ff15d9243ee7de817124b19a00ce80762bb2ef",
"size": "7872",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/improvements_domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97795"
},
{
"name": "HTML",
"bytes": "1128491"
},
{
"name": "JavaScript",
"bytes": "733121"
},
{
"name": "Python",
"bytes": "9362251"
},
{
"name": "Shell",
"bytes": "10639"
},
{
"name": "TypeScript",
"bytes": "6077851"
}
],
"symlink_target": ""
}
|
"""
This package contains objects used by :class:`.Node`\ s, but that are not nodes
themselves. This includes template parameters and HTML tag attributes.
"""
from .attribute import Attribute
from .parameter import Parameter
|
{
"content_hash": "cb61f59e36b1e5663068f466f7d2b76c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 79,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.7787610619469026,
"repo_name": "hperala/kontuwikibot",
"id": "2d90b4e07fc94f7b2f54ee4e53cf48c17178a8e7",
"size": "1375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mwparserfromhell/nodes/extras/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "C",
"bytes": "137889"
},
{
"name": "C++",
"bytes": "4113"
},
{
"name": "Python",
"bytes": "3758566"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
os.environ['BOKEH_DOCS_MISSING_API_KEY_OK'] = 'yes'
try:
import limix_genetics
version = limix_genetics.__version__
except ImportError:
version = 'unknown'
needs_sphinx = '1.5'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_plot',
]
napoleon_google_docstring = True
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'limix-genetics'
copyright = '2016, Danilo Horta'
author = 'Danilo Horta'
release = version
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'conf.py']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'default'
htmlhelp_basename = 'limix-geneticsdoc'
latex_elements = {}
latex_documents = [
(master_doc, 'limix-genetics.tex', 'limix-genetics Documentation',
'Danilo Horta', 'manual'),
]
man_pages = [
(master_doc, 'limix-genetics', 'limix-genetics Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'limix-genetics', 'limix-genetics Documentation',
author, 'limix-genetics', 'One line description of project.',
'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
|
{
"content_hash": "4d6eb1428c82216bbf6acd5d49adac15",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 70,
"avg_line_length": 26.574074074074073,
"alnum_prop": 0.6731707317073171,
"repo_name": "glimix/limix-genetics",
"id": "aa705701ac89826cb33a70e1c17ec102ce2eee92",
"size": "1460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15065"
},
{
"name": "Shell",
"bytes": "689"
}
],
"symlink_target": ""
}
|
def extractDarkroadtalesBlogspotCom(item):
'''
Parser for 'darkroadtales.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "709ee1ecdb6244ff2b95df366d1540bd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.80952380952381,
"alnum_prop": 0.6376554174067496,
"repo_name": "fake-name/ReadableWebProxy",
"id": "2dd54d5c95dc7f647d40049b183b95d73a2f37ed",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractDarkroadtalesBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
import multiprocessing
from setuptools import setup
setup(name='serf_master',
version='0.4',
description='helpers for writing manageable Serf handlers',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: System :: Systems Administration',
],
keywords='serf',
url='http://github.com/garethr/serf-master',
author='Gareth Rushgrove',
author_email='gareth@morethanseven.net',
license='MIT',
packages=['serf_master'],
include_package_data=True,
test_suite='nose.collector',
tests_require=[
'nose',
'mock',
'coverage',
],
zip_safe=False)
|
{
"content_hash": "f419b5ac9aa4dcec271f3735ca8336b5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 63,
"avg_line_length": 28.62962962962963,
"alnum_prop": 0.6170763260025873,
"repo_name": "garethr/serf-master",
"id": "047403ed2e2b2c34b5c2c32d1e4963ecef2539d4",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6313"
}
],
"symlink_target": ""
}
|
"""
Unittests related to the mechanism module.
"""
import pytest
from ctypes import cast, c_ulong, c_ubyte
from mock import patch
from six import integer_types
from pycryptoki.conversions import from_hex, to_bytestring
from pycryptoki.cryptoki import (
CK_RSA_PKCS_PSS_PARAMS,
POINTER,
CK_ULONG,
CK_AES_GCM_PARAMS,
CK_MECHANISM,
)
from pycryptoki.defines import *
from pycryptoki.mechanism import (
Mechanism,
MechanismException,
AutoMech,
MECH_LOOKUP,
AESGCMMechanism,
NullMech,
EDDSAMechanism,
)
MECH_PARAMS = {
CKM_AES_XTS: {"hTweakKey": 0, "cb": list(range(12)), "test_id": "AES_XTS"},
CKM_DES3_CBC: {"iv": list(range(12)), "test_id": "DES3"},
CKM_AES_CBC: {"iv": list(range(16)), "test_id": "AES_CBC"},
CKM_RC2_ECB: {"usEffectiveBits": 8, "test_id": "RC2_ECB"},
CKM_RC2_CBC: {"usEffectiveBits": 8, "iv": list(range(8)), "test_id": "RC2_CBC"},
CKM_RC5_ECB: {"ulWordsize": 8, "ulRounds": 8, "test_id": "RC5_ECB"},
CKM_RC5_CBC: {"ulWordsize": 8, "ulRounds": 2, "iv": list(range(12)), "test_id": "RC5_CBC"},
CKM_RSA_PKCS_OAEP: {
"hashAlg": CKM_SHA_1,
"mgf": CKG_MGF1_SHA1,
"sourceData": list(range(12)),
"test_id": "RSA_OAEP",
},
CKM_AES_GCM: {"iv": list(range(16)), "AAD": b"deadbeef", "ulTagBits": 32, "test_id": "AES_GCM"},
CKM_RSA_PKCS_PSS: {"hashAlg": CKM_SHA_1, "mgf": CKG_MGF1_SHA1, "test_id": "RSA_PSS"},
}
def idfn(test):
return MECH_PARAMS[test].pop("test_id", "unknown")
# noinspection PyArgumentList
class TestMechanisms(object):
@pytest.mark.parametrize(
"flavor,params",
[
(CKM_AES_XTS, ["hTweakKey", "cb"]),
(CKM_RC2_ECB, ["usEffectiveBits"]),
(CKM_RC2_CBC, ["usEffectiveBits", "iv"]),
(CKM_RC5_ECB, ["ulWordsize", "ulRounds"]),
(CKM_RC5_CBC, ["ulWordsize", "ulRounds", "iv"]),
(CKM_RSA_PKCS_OAEP, ["hashAlg", "mgf"]),
],
ids=["XTS", "RC2", "RC2_CBC", "RC5", "RC5_CBC", "RSA_PKCS_OAEP"],
)
def test_missing_params(self, flavor, params):
"""
Test that missing parameters for various mechs raises the appropriate exception.
:param crypto_session:
:return:
"""
with pytest.raises(MechanismException) as excinfo:
mech = Mechanism(flavor)
for x in params:
assert x in str(excinfo.value)
def test_auto_mechanism_simple_vals(self):
"""
Test that a mechanism created via the 'automech' creates a mechanism as expected.
:return:
"""
# Patch the mechanism lookup so that we don't have to have an undefined
# mechanism to test the automech.
with patch.dict(MECH_LOOKUP, {}, clear=True):
pymech = AutoMech(
CKM_RSA_PKCS_PSS,
params={
"params_name": "CK_RSA_PKCS_PSS_PARAMS",
"hashAlg": CKM_SHA_1,
"mgf": CKG_MGF1_SHA1,
"usSaltLen": 8,
},
)
assert isinstance(pymech, AutoMech)
cmech = pymech.to_c_mech()
params = cast(cmech.pParameter, POINTER(CK_RSA_PKCS_PSS_PARAMS)).contents
assert params.hashAlg == CKM_SHA_1
assert params.mgf == CKG_MGF1_SHA1
assert params.usSaltLen == 8
assert isinstance(params.usSaltLen, (integer_types, CK_ULONG))
assert isinstance(params.hashAlg, (integer_types, CK_ULONG))
assert isinstance(params.mgf, (integer_types, CK_ULONG))
def test_null_mechanism_indirect_instantiation(self):
"""
Test automech by instantiating Mechanism() instead of AutoMech()
:return:
"""
# Patch the mechanism lookup so that we don't have to have an undefined
# mechanism to test the automech.
with patch.dict(MECH_LOOKUP, {}, clear=True):
pymech = Mechanism(CKM_RSA_PKCS_PSS)
assert isinstance(pymech, NullMech)
cmech = pymech.to_c_mech()
assert cmech.pParameter is None
assert cmech.usParameterLen == 0
def test_exact_mechanism_use(self):
"""
Test that directly instantiating a subclass of Mechanism works as expected.
:return:
"""
mech = AESGCMMechanism(
mech_type=CKM_AES_GCM,
params={
"AAD": to_bytestring(from_hex(b"deadbeef")),
"iv": list(range(12)),
"ulTagBits": 32,
},
)
cmech = mech.to_c_mech()
cparams = cast(cmech.pParameter, POINTER(CK_AES_GCM_PARAMS)).contents
assert cparams.ulTagBits == 32
@pytest.mark.parametrize("flavor", list(MECH_PARAMS.keys()), ids=idfn)
def test_mech_conversions(self, flavor):
"""
Test that converting each mechanism works as expected w/ valid params.
"""
params = MECH_PARAMS[flavor]
mech = Mechanism(flavor, params=params)
cmech = mech.to_c_mech()
# Would prefer to check if it's a c_void_p, but it gets transformed directly to
# an int/long depending on memory location.
assert isinstance(cmech.pParameter, (integer_types, c_ulong))
assert isinstance(cmech.usParameterLen, (integer_types, c_ulong))
assert isinstance(cmech, CK_MECHANISM)
assert cmech.mechanism == flavor
def test_default_iv_params(self):
"""
Verify passing no IV to a mech requiring an IV will use the default value.
"""
cmech = Mechanism(CKM_DES3_CBC).to_c_mech()
rawiv = cast(cmech.pParameter, POINTER(c_ubyte))
iv = [rawiv[x] for x in range(cmech.usParameterLen)]
assert iv == [0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38]
def test_default_iv6_params(self):
"""
Verify passing no IV to a mech requiring an IV will use the default value.
"""
cmech = Mechanism(CKM_AES_CBC).to_c_mech()
rawiv = cast(cmech.pParameter, POINTER(c_ubyte))
iv = [rawiv[x] for x in range(cmech.usParameterLen)]
assert iv == [1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]
@pytest.mark.parametrize(
"flavor",
[CKM_SHA256, CKM_SHA512, CKM_DSA, CKM_RSA_PKCS],
ids=["SHA256", "SHA512", "DSA", "RSA_PKCS"],
)
def test_null_mech(self, flavor):
"""
Verify creating a 'null mech' will fill out the parameter fields properly.
"""
cmech = NullMech(flavor).to_c_mech()
assert cmech.pParameter is None
assert cmech.usParameterLen == 0
def test_no_params_given_automech(self):
"""
Verify that creating an automech w/o a params_name in the dictionary
will fail.
"""
with patch.dict(MECH_LOOKUP, {}, clear=True):
with pytest.raises(MechanismException) as excinfo:
cmech = AutoMech(CKM_DES3_CBC).to_c_mech()
assert "Failed to find a suitable Ctypes Parameter" in str(excinfo.value)
@pytest.mark.parametrize(
"mech_type,params",
[
(CKM_EDDSA, None),
(CKM_EDDSA, {"phFlag": True}),
(CKM_EDDSA_NACL, None),
(CKM_EDDSA_NACL, {"phFlag": True}),
],
)
def test_eddsa_mech(self, mech_type, params):
"""Test EDDSAMechanism"""
mechanism = EDDSAMechanism(mech_type, params)
mechanism.to_c_mech()
assert mechanism.mech.mechanism == mech_type
if params:
assert mechanism.mech.pParameter
else:
assert not mechanism.mech.pParameter
|
{
"content_hash": "1ad1c98f2bbc5398918307a3073e9df0",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 100,
"avg_line_length": 35.325688073394495,
"alnum_prop": 0.5770679132580184,
"repo_name": "gemalto/pycryptoki",
"id": "ed7a59546d2a70c0c92ed4eb10afba1fc2192f09",
"size": "7701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unittests/test_mechanisms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "791480"
}
],
"symlink_target": ""
}
|
from django.utils.timezone import utc
from django.http import HttpResponse
from django.core.exceptions import ValidationError
from sis_provisioner.models.group import Group, GroupMemberGroup
from sis_provisioner.models.course import Course
from sis_provisioner.dao.group import valid_group_id
from sis_provisioner.dao.course import (
valid_canvas_course_id, valid_course_sis_id, adhoc_course_sis_id)
from sis_provisioner.exceptions import (
GroupPolicyException, CoursePolicyException)
from blti.views import BLTILaunchView, RESTDispatch
from logging import getLogger
from datetime import datetime
import json
import re
logger = getLogger(__name__)
class GroupsLaunchView(BLTILaunchView):
template_name = 'groups/main.html'
authorized_role = 'admin'
def get_context_data(self, **kwargs):
if self.blti.course_sis_id is not None:
course_sis_id = self.blti.course_sis_id
else:
course_sis_id = adhoc_course_sis_id(self.blti.canvas_course_id)
return {
'session_id': self.request.session.session_key,
'sis_course_id': course_sis_id,
'canvas_course_id': self.blti.canvas_course_id,
'canvas_account_id': self.blti.canvas_account_id,
'launch_presentation_return_url': self.blti.return_url,
}
class GroupView(RESTDispatch):
""" Exposes Group model
GET returns Group details
POST inserts new Group
DELETE removes Group
"""
authorized_role = 'admin'
def get(self, request, *args, **kwargs):
group_id = kwargs.get('id')
if group_id is not None:
return self._getGroupById(group_id)
else:
return self._getGroupsByQuery(request)
def post(self, request, *args, **kwargs):
try:
course_id, canvas_id, group_id, role = self._validate_post(request)
group = Group.objects.get(course_id=course_id,
group_id=group_id,
role=role)
if group.is_deleted:
group.is_deleted = None
group.deleted_by = None
group.deleted_date = None
group.provisioned_date = None
group.added_date = datetime.utcnow().replace(tzinfo=utc)
else:
return self.error_response(
400, 'Group {} has duplicate role in course'.format(
group_id))
except Group.DoesNotExist:
try:
valid_group_id(group_id)
group = Group(course_id=course_id,
group_id=group_id,
role=role)
except GroupPolicyException as ex:
logger.info('POST policy error: {}'.format(ex))
return self.error_response(403, ex)
except (CoursePolicyException, GroupPolicyException,
ValidationError) as ex:
logger.info('POST error: {}'.format(ex))
return self.error_response(400, ex)
group.priority = Course.PRIORITY_IMMEDIATE
group.added_by = self.blti.user_login_id
group.save()
return self.json_response(group.json_data())
def delete(self, request, *args, **kwargs):
try:
id = self._valid_model_id(kwargs['id'])
group = Group.objects.get(id=id)
group.is_deleted = True
group.deleted_date = datetime.utcnow().replace(tzinfo=utc)
group.priority = Course.PRIORITY_IMMEDIATE
group.deleted_by = self.blti.user_login_id
group.save()
# only group use? mark member groups deleted too
reused = Group.objects.filter(
group_id=group.group_id, is_deleted__isnull=True).count()
if reused == 0:
for gmg in GroupMemberGroup.objects.filter(
root_group_id=group.group_id, is_deleted__isnull=True):
gmg.is_deleted = True
gmg.save()
except ValidationError as err:
logger.info('DELETE group error: {}'.format(err))
return self.error_response(400, err)
except Group.DoesNotExist:
return self.error_response(404, 'Group not found ({})'.format(id))
return HttpResponse('')
def _getGroupById(self, id):
try:
group = Group.objects.get(id=id, is_deleted=None)
return self.json_response(group.json_data())
except Group.DoesNotExist:
return self.error_response(404, 'Group id {} not found'.format(id))
def _getGroupsByQuery(self, request):
terms = {
'queue_id': lambda x: self._valid_model_id(x),
'course_id': lambda x: self._valid_course_id(x),
'group_id': lambda x: self._valid_group_id(x),
'role': lambda x: self._valid_role(x)}
kwargs = {}
for key in terms:
try:
kwargs[key] = terms[key](request.GET.get(key))
except (CoursePolicyException, GroupPolicyException,
ValidationError):
pass
if not len(kwargs):
return self.error_response(400, 'Invalid search: No search terms')
groups = []
for group in Group.objects.find_by_search(**kwargs):
groups.append(group.json_data())
return self.json_response({'groups': groups})
def _validate_post(self, request):
values = json.loads(request.read())
return (
self._valid_course_id(values.get('course_id')),
self._valid_canvas_id(values.get('canvas_id')),
self._valid_group_id(values.get('group_id').strip()),
self._valid_role(values.get('role'))
)
def _valid_course_id(self, sis_id):
valid_course_sis_id(sis_id)
try:
course = Course.objects.get(course_id=sis_id)
except Course.DoesNotExist:
course = Course(
course_id=sis_id,
course_type=Course.ADHOC_TYPE,
term_id='',
added_date=datetime.utcnow().replace(tzinfo=utc),
priority=Course.PRIORITY_NONE)
course.save()
return course.course_id
def _valid_group_id(self, group_id):
valid_group_id(group_id)
return group_id.lower()
def _valid_role(self, role):
if role is not None and len(role):
return role
raise ValidationError("Invalid Role: {}".format(role))
def _valid_canvas_id(self, course_id):
valid_canvas_course_id(course_id)
return course_id
def _valid_model_id(self, model_id):
re_model_id = re.compile(r"^\d+$")
if (re_model_id.match(str(model_id)) is None):
raise ValidationError("Invalid ID: {}".format(model_id))
return model_id
|
{
"content_hash": "9fb13bcfb4882c64f5ea5de1a1d2725c",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 36.61578947368421,
"alnum_prop": 0.5759666522926549,
"repo_name": "uw-it-aca/canvas-sis-provisioner",
"id": "99c4780b64b6a1f50594bb0f96debbee770e8e38",
"size": "7045",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sis_provisioner/views/groups/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "787"
},
{
"name": "HTML",
"bytes": "46627"
},
{
"name": "JavaScript",
"bytes": "97978"
},
{
"name": "Less",
"bytes": "24842"
},
{
"name": "Python",
"bytes": "512023"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
}
|
from django.urls import reverse
from django.http import Http404
from django.test import TestCase, override_settings
import mock
from rest_framework.exceptions import APIException, PermissionDenied
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.routers import SimpleRouter
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
class DummyViewSet(GenericViewSet):
"""Dummy test viewset that raises an exception when calling list()."""
def list(self, *args, **kwargs):
raise Exception('something went wrong')
test_exception = SimpleRouter()
test_exception.register('testexcept', DummyViewSet, basename='test-exception')
@override_settings(ROOT_URLCONF=test_exception.urls)
class TestExceptionHandlerWithViewSet(TestCase):
# The test client connects to got_request_exception, so we need to mock it
# otherwise it would immediately re-raise the exception.
@mock.patch('olympia.api.exceptions.got_request_exception')
def test_view_exception(self, got_request_exception_mock):
url = reverse('test-exception-list')
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False, DEBUG=False):
response = self.client.get(url)
assert response.status_code == 500
assert response.data == {'detail': 'Internal Server Error'}
assert got_request_exception_mock.send.call_count == 1
assert got_request_exception_mock.send.call_args[0][0] == DummyViewSet
assert isinstance(
got_request_exception_mock.send.call_args[1]['request'], Request)
# The test client connects to got_request_exception, so we need to mock it
# otherwise it would immediately re-raise the exception.
@mock.patch('olympia.api.exceptions.got_request_exception')
def test_view_exception_debug(self, got_request_exception_mock):
url = reverse('test-exception-list')
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False, DEBUG=True):
response = self.client.get(url)
assert response.status_code == 500
data = response.data
assert set(data.keys()) == set(['detail', 'traceback'])
assert data['detail'] == 'Internal Server Error'
assert 'Traceback (most recent call last):' in data['traceback']
assert got_request_exception_mock.send.call_count == 1
assert got_request_exception_mock.send.call_args[0][0] == DummyViewSet
assert isinstance(
got_request_exception_mock.send.call_args[1]['request'], Request)
class TestExceptionHandler(TestCase):
def test_api_exception_handler_returns_response(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise APIException()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 500
def test_exception_handler_returns_response_for_404(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise Http404()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 404
def test_exception_handler_returns_response_for_403(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise PermissionDenied()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 403
def test_non_api_exception_handler_returns_response(self):
# Regular DRF exception handler does not return a Response for non-api
# exceptions, but we do.
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise Exception()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 500
def test_api_exception_handler_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(APIException):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise APIException()
except Exception as exc:
exception_handler(exc, {})
def test_exception_handler_404_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(Http404):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise Http404()
except Exception as exc:
exception_handler(exc, {})
def test_exception_handler_403_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(PermissionDenied):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise PermissionDenied()
except Exception as exc:
exception_handler(exc, {})
def test_non_api_exception_handler_with_propagation(self):
# Regular DRF exception handler does not return a Response for non-api
# exceptions, but we do.
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(KeyError):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise KeyError()
except Exception as exc:
exception_handler(exc, {})
|
{
"content_hash": "92b968c7736756c9851a1c6c4817b5cb",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 78,
"avg_line_length": 41.45945945945946,
"alnum_prop": 0.6465123859191656,
"repo_name": "wagnerand/olympia",
"id": "5da7ea3e1e64a2797b13f3b61c6cd6743dde3a35",
"size": "6136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/olympia/api/tests/test_exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3996776"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
from pytest import fixture
from mock import Mock, call
from mailthon.middleware import TLS, Auth
from .utils import tls_started
@fixture
def smtp():
return Mock()
class TestTlsSupported:
@fixture
def conn(self, smtp):
smtp.has_extn.return_value = True
return smtp
def test_no_force(self, conn):
tls = TLS()
tls(conn)
assert conn.mock_calls[0] == call.has_extn('STARTTLS')
assert tls_started(conn)
def test_force(self, conn):
tls = TLS(force=True)
tls(conn)
assert tls_started(conn)
class TestTLSUnsupported:
@fixture
def conn(self, smtp):
smtp.has_extn.return_value = False
return smtp
def test_no_force(self, conn):
tls = TLS()
tls(conn)
assert not tls_started(conn)
class TestAuth:
def test_logs_in_user(self, smtp):
auth = Auth(username='user', password='pass')
auth(smtp)
assert call.login('user', 'pass') in smtp.mock_calls
|
{
"content_hash": "ba54ba3770e93f3907353a264d317779",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 62,
"avg_line_length": 20.32,
"alnum_prop": 0.610236220472441,
"repo_name": "ashgan-dev/mailthon",
"id": "59913af527f5f34d81de97fa94488563ea0da346",
"size": "1016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40004"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from RouteFinder import views
urlpatterns = patterns('',
#User request submission views
url(r'^$', views.RequestList.as_view(), name='request_list'),
url(r'add/$', views.RequestCreate.as_view(), name='request_add'),
url(r'(?P<pk>\d+)/$', views.RequestUpdate.as_view(), name='request_update'),
url(r'(?P<pk>\d+)/delete/$', views.RequestDelete.as_view(), name='request_delete'),
#Planning views
url(r'PrepareSolution/$', views.PrepareSolutionView.as_view(), name='preparation'),
url(r'ExamineSolution/$', views.ExamineSolutionView.as_view(), name='solution'),
#Test data
url(r'Populatewithtest/$', views.PopulateTest, name='populate')
)
|
{
"content_hash": "454e66600efc8b1bdf27bdcd00864b7b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 87,
"avg_line_length": 38.05263157894737,
"alnum_prop": 0.6791147994467497,
"repo_name": "MatthewGWilliams/Staff-Transport",
"id": "7b0d5c129764d04dd334f3ade78aa86e1e1874ad",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emergencyTransport/RouteFinder/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "190554"
},
{
"name": "CSS",
"bytes": "1163"
},
{
"name": "JavaScript",
"bytes": "7015"
},
{
"name": "Python",
"bytes": "57863"
}
],
"symlink_target": ""
}
|
"""WSGI support for the Tornado web framework.
WSGI is the Python standard for web servers, and allows for interoperability
between Tornado and other Python web frameworks and servers. This module
provides WSGI support in two ways:
* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
interface. This is useful for running a Tornado app on another
HTTP server, such as Google App Engine. See the `WSGIAdapter` class
documentation for limitations that apply.
* `WSGIContainer` lets you run other WSGI applications and frameworks on the
Tornado HTTP server. For example, with this class you can mix Django
and Tornado handlers in a single server.
"""
from __future__ import absolute_import, division, print_function
import sys
from io import BytesIO
import tornado
from tornado.concurrent import Future
from tornado import escape
from tornado import httputil
from tornado.log import access_log
from tornado import web
from tornado.escape import native_str
from tornado.util import unicode_type, PY3
if PY3:
import urllib.parse as urllib_parse # py3
else:
import urllib as urllib_parse
# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
# that are smuggled inside objects of type unicode (via the latin1 encoding).
# These functions are like those in the tornado.escape module, but defined
# here to minimize the temptation to use them in non-wsgi contexts.
if str is unicode_type:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s.decode('latin1')
def from_wsgi_str(s):
assert isinstance(s, str)
return s.encode('latin1')
else:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s
def from_wsgi_str(s):
assert isinstance(s, str)
return s
class WSGIApplication(web.Application):
"""A WSGI equivalent of `tornado.web.Application`.
.. deprecated:: 4.0
Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
"""
def __call__(self, environ, start_response):
return WSGIAdapter(self)(environ, start_response)
# WSGI has no facilities for flow control, so just return an already-done
# Future when the interface requires it.
_dummy_future = Future()
_dummy_future.set_result(None)
class _WSGIConnection(httputil.HTTPConnection):
def __init__(self, method, start_response, context):
self.method = method
self.start_response = start_response
self.context = context
self._write_buffer = []
self._finished = False
self._expected_content_remaining = None
self._error = None
def set_close_callback(self, callback):
# WSGI has no facility for detecting a closed connection mid-request,
# so we can simply ignore the callback.
pass
def write_headers(self, start_line, headers, chunk=None, callback=None):
if self.method == 'HEAD':
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
self.start_response(
'%s %s' % (start_line.code, start_line.reason),
[(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
if chunk is not None:
self.write(chunk, callback)
elif callback is not None:
callback()
return _dummy_future
def write(self, chunk, callback=None):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
self._error = httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
raise self._error
self._write_buffer.append(chunk)
if callback is not None:
callback()
return _dummy_future
def finish(self):
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0):
self._error = httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
raise self._error
self._finished = True
class _WSGIRequestContext(object):
def __init__(self, remote_ip, protocol):
self.remote_ip = remote_ip
self.protocol = protocol
def __str__(self):
return self.remote_ip
class WSGIAdapter(object):
"""Converts a `tornado.web.Application` instance into a WSGI application.
Example usage::
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
wsgi_app = tornado.wsgi.WSGIAdapter(application)
server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
server.serve_forever()
See the `appengine demo
<https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
for an example of using this module to run a Tornado app on Google
App Engine.
In WSGI mode asynchronous methods are not supported. This means
that it is not possible to use `.AsyncHTTPClient`, or the
`tornado.auth` or `tornado.websocket` modules.
.. versionadded:: 4.0
"""
def __init__(self, application):
if isinstance(application, WSGIApplication):
self.application = lambda request: web.Application.__call__(
application, request)
else:
self.application = application
def __call__(self, environ, start_response):
method = environ["REQUEST_METHOD"]
uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
if environ.get("QUERY_STRING"):
uri += "?" + environ["QUERY_STRING"]
headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
headers["Content-Length"] = environ["CONTENT_LENGTH"]
for key in environ:
if key.startswith("HTTP_"):
headers[key[5:].replace("_", "-")] = environ[key]
if headers.get("Content-Length"):
body = environ["wsgi.input"].read(
int(headers["Content-Length"]))
else:
body = b""
protocol = environ["wsgi.url_scheme"]
remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
host = environ["HTTP_HOST"]
else:
host = environ["SERVER_NAME"]
connection = _WSGIConnection(method, start_response,
_WSGIRequestContext(remote_ip, protocol))
request = httputil.HTTPServerRequest(
method, uri, "HTTP/1.1", headers=headers, body=body,
host=host, connection=connection)
request._parse_body()
self.application(request)
if connection._error:
raise connection._error
if not connection._finished:
raise Exception("request did not finish synchronously")
return connection._write_buffer
class WSGIContainer(object):
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
.. warning::
WSGI is a *synchronous* interface, while Tornado's concurrency model
is based on single-threaded asynchronous execution. This means that
running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
than running the same app in a multi-threaded WSGI server like
``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
benefits to combining Tornado and WSGI in the same process that
outweigh the reduced scalability.
Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
run it. For example::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.current().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop.
The `tornado.web.FallbackHandler` class is often useful for mixing
Tornado and WSGI apps in the same server. See
https://github.com/bdarnell/django-tornado-demo for a complete example.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code, reason = data["status"].split(' ', 1)
status_code = int(status_code)
headers = data["headers"]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
header_obj.add(key, value)
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(escape.url_unescape(
request.path, encoding=None, plus=False)),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
HTTPRequest = httputil.HTTPServerRequest
|
{
"content_hash": "8fc55e0aebcbf6df225a5718de94d8ce",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 81,
"avg_line_length": 37.4766081871345,
"alnum_prop": 0.616446906452368,
"repo_name": "Lancher/tornado",
"id": "22be7a897235080ac5805bb9cf638ab4a7d9af77",
"size": "13392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tornado/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1664"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1610904"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
}
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pf-@jxtojga)z+4s*uwbgjrq$aep62-thd0q7f&o77xtpka!_m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'guestbook'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# When 'NODB' is enabled,we skip Database and Cache setup. This is useful
# to test the rest of the Django deployment while boostrapping the application.
if os.getenv('NODB'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
# Dockerfile reads the DJANGO_PW from the secret into an environment
# variable but its not there on kubectl exec. Soon Kubernetes versions
# will have secrets as environment variables but currently just read it
# from the volume
DJANGO_PW = os.getenv('DJANGO_PASSWORD')
if not DJANGO_PW:
try:
f = open('/etc/secrets/djangouserpw')
DJANGO_PW = f.readline().rstrip()
except IOError:
pass
if not DJANGO_PW:
raise Exception("No DJANGO_PASSWORD provided.")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'guestbook',
'USER': 'django_user',
'PASSWORD': DJANGO_PW,
'HOST': os.getenv('POSTGRES_SERVICE_HOST', '127.0.0.1'),
'PORT': os.getenv('POSTGRES_SERVICE_PORT', 5432)
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': [
'%s:%s' % (os.getenv('REDIS_MASTER_SERVICE_HOST', '127.0.0.1'),
os.getenv('REDIS_MASTER_SERVICE_PORT', 6379)),
],
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
},
'MAX_CONNECTIONS': 1000,
'PICKLE_VERSION': -1,
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# STATIC_URL = 'https://storage.googleapis.com/delete-me-1156/static/'
STATIC_ROOT = 'static/'
|
{
"content_hash": "257051bd77ae9901c354b643a8acd716",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 30.696296296296296,
"alnum_prop": 0.6124517374517374,
"repo_name": "waprin/kubernetes_django_postgres_redis",
"id": "b2ec69ffe6be4ca0b18a4a9705410d4b31ec95a0",
"size": "4792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guestbook/mysite/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6069"
},
{
"name": "JavaScript",
"bytes": "1518"
},
{
"name": "Makefile",
"bytes": "5657"
},
{
"name": "Python",
"bytes": "13148"
}
],
"symlink_target": ""
}
|
import os
import sys
import pkg_resources # part of setuptools
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.join(os.path.dirname(cwd), u"src")
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.doctest",
"sphinx.ext.autosectionlabel",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"cattrs"
copyright = u"2020, Tin Tvrtković"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pkg_resources.require(project)[0].version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
# keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "cattrsdoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
"index",
"cattrs.tex",
u"cattrs Documentation",
u"Tin Tvrtković",
"manual",
)
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "cattrs", u"cattrs Documentation", [u"Tin Tvrtković"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"cattrs",
u"cattrs Documentation",
u"Tin Tvrtković",
"cattrs",
"Composable complex class support for attrs.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
doctest_global_setup = (
"import attr, cattr;"
"from attr import define;"
"from typing import *;"
"from enum import Enum, unique"
)
|
{
"content_hash": "c7246c1e0b230bd95994e0885347add3",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 76,
"avg_line_length": 30.206521739130434,
"alnum_prop": 0.6910159529806885,
"repo_name": "Tinche/cattrs",
"id": "44b6cf68477732fcc786d62308743be444ef7094",
"size": "8782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2290"
},
{
"name": "Python",
"bytes": "80520"
}
],
"symlink_target": ""
}
|
import unittest
from twitter.common.app.modules.varz import VarsEndpoint, VarsSubsystem
from twitter.common.http.server import request
from twitter.common.quantity import Amount, Time
from twitter.common.metrics import NamedGauge, RootMetrics
import json
import pytest
class TestVarz(unittest.TestCase):
def test_breaking_out_regex(self):
vars_subsystem = VarsSubsystem()
regex = vars_subsystem.compile_stats_filters(["alpha", "beta.*"])
assert regex.match("alpha")
assert not regex.match("something_alpha_something")
assert regex.match("beta")
assert regex.match("beta_suffix")
assert not regex.match("abeta")
def test_filtering_vars_filter_enabled_and_requested(self):
rm = RootMetrics()
zone = NamedGauge('alpha', "wont_be_visible")
alpha = NamedGauge('zone', "smf1")
rm.register(zone)
rm.register(alpha)
metrics = RootMetrics()
vars_subsystem = VarsSubsystem()
regex = vars_subsystem.compile_stats_filters(["alpha", "beta.*"])
endpoint = VarsEndpoint(period=Amount(60000, Time.MILLISECONDS), stats_filter=regex)
request.GET.append('filtered', '1')
metrics_returned = endpoint.handle_vars_json()
assert "zone" in metrics_returned
assert "alpha" not in metrics_returned
request.GET.replace('filtered', None)
def test_filtering_vars_filter_enabled_and_not_requested(self):
rm = RootMetrics()
zone = NamedGauge('alpha', "wont_be_visible")
alpha = NamedGauge('zone', "smf1")
rm.register(zone)
rm.register(alpha)
metrics = RootMetrics()
vars_subsystem = VarsSubsystem()
regex = vars_subsystem.compile_stats_filters(["alpha", "beta.*"])
endpoint = VarsEndpoint(period=Amount(60000, Time.MILLISECONDS), stats_filter=regex)
metrics_returned = endpoint.handle_vars_json()
assert "zone" in metrics_returned
assert "alpha" in metrics_returned
request.GET.replace('filtered', None)
def test_filtering_vars_filter_disabled_and_requested(self):
rm = RootMetrics()
zone = NamedGauge('alpha', "wont_be_visible")
alpha = NamedGauge('zone', "smf1")
rm.register(zone)
rm.register(alpha)
metrics = RootMetrics()
vars_subsystem = VarsSubsystem()
regex = None
endpoint = VarsEndpoint(period=Amount(60000, Time.MILLISECONDS), stats_filter=regex)
request.GET.append('filtered', '1')
metrics_returned = endpoint.handle_vars_json()
assert "zone" in metrics_returned
assert "alpha" in metrics_returned
request.GET.replace('filtered', None)
|
{
"content_hash": "9f6b2153e671bd2e078dc6fba9ef41db",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 88,
"avg_line_length": 36.57971014492754,
"alnum_prop": 0.7087955625990491,
"repo_name": "WCCCEDU/twitter-commons",
"id": "271000931e2fd35e4a5b87d81cf15b7792b8515c",
"size": "3425",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "tests/python/twitter/common/app/test_varz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "26960"
},
{
"name": "HTML",
"bytes": "14899"
},
{
"name": "Java",
"bytes": "2607127"
},
{
"name": "JavaScript",
"bytes": "29955"
},
{
"name": "Python",
"bytes": "1202158"
},
{
"name": "Scala",
"bytes": "8271"
},
{
"name": "Shell",
"bytes": "27935"
},
{
"name": "Smalltalk",
"bytes": "79"
},
{
"name": "Thrift",
"bytes": "51878"
}
],
"symlink_target": ""
}
|
import angr
from angr.sim_type import SimTypeTop, SimTypeLength, SimTypeInt
import logging
l = logging.getLogger("angr.procedures.libc.memcmp")
class memcmp(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, s1_addr, s2_addr, n):
# TODO: look into smarter types here
self.argument_types = {0: self.ty_ptr(SimTypeTop()),
1: self.ty_ptr(SimTypeTop()),
2: SimTypeLength(self.state.arch)}
self.return_type = SimTypeInt(32, True)
max_memcmp_size = self.state.libc.max_buffer_size
definite_size = self.state.se.min_int(n)
conditional_s1_start = s1_addr + definite_size
conditional_s2_start = s2_addr + definite_size
if self.state.se.symbolic(n):
conditional_size = int(max(max_memcmp_size - definite_size, 0))
else:
conditional_size = 0
l.debug("Definite size %s and conditional size: %s", definite_size, conditional_size)
if definite_size > 0:
s1_part = self.state.memory.load(s1_addr, definite_size, endness='Iend_BE')
s2_part = self.state.memory.load(s2_addr, definite_size, endness='Iend_BE')
cases = [ [s1_part == s2_part, self.state.se.BVV(0, self.state.arch.bits)], [self.state.se.ULT(s1_part, s2_part), self.state.se.BVV(-1, self.state.arch.bits)], [self.state.se.UGT(s1_part, s2_part), self.state.se.BVV(1, self.state.arch.bits) ] ]
definite_answer = self.state.se.ite_cases(cases, 2)
constraint = self.state.se.Or(*[c for c,_ in cases])
self.state.add_constraints(constraint)
l.debug("Created definite answer: %s", definite_answer)
l.debug("Created constraint: %s", constraint)
l.debug("... crom cases: %s", cases)
else:
definite_answer = self.state.se.BVV(0, self.state.arch.bits)
if not self.state.se.symbolic(definite_answer) and self.state.se.eval(definite_answer) != 0:
return definite_answer
if conditional_size > 0:
s1_all = self.state.memory.load(conditional_s1_start, conditional_size, endness='Iend_BE')
s2_all = self.state.memory.load(conditional_s2_start, conditional_size, endness='Iend_BE')
conditional_rets = { 0: definite_answer }
for byte, bit in zip(range(conditional_size), range(conditional_size*8, 0, -8)):
s1_part = s1_all[conditional_size*8-1 : bit-8]
s2_part = s2_all[conditional_size*8-1 : bit-8]
cases = [ [s1_part == s2_part, self.state.se.BVV(0, self.state.arch.bits)], [self.state.se.ULT(s1_part, s2_part), self.state.se.BVV(-1, self.state.arch.bits)], [self.state.se.UGT(s1_part, s2_part), self.state.se.BVV(1, self.state.arch.bits) ] ]
conditional_rets[byte+1] = self.state.se.ite_cases(cases, 0)
self.state.add_constraints(self.state.se.Or(*[c for c,_ in cases]))
ret_expr = self.state.solver.If(definite_answer == 0, self.state.se.ite_dict(n - definite_size, conditional_rets, 2), definite_answer)
self.state.add_constraints(self.state.se.Or(*[n-definite_size == c for c in conditional_rets.keys()]))
return ret_expr
else:
return definite_answer
|
{
"content_hash": "d5fd51138d11ec376d0a37013172d2b3",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 260,
"avg_line_length": 53.75806451612903,
"alnum_prop": 0.6150615061506151,
"repo_name": "axt/angr",
"id": "7f9f61ba9b5b0b50b060aab31889b997c279fb80",
"size": "3333",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "angr/procedures/libc/memcmp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "38446"
},
{
"name": "Makefile",
"bytes": "617"
},
{
"name": "Python",
"bytes": "2753899"
}
],
"symlink_target": ""
}
|
"""
Python interface to KrisLibrary nonlinear, multidimensional root finding routines
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_rootfind', [dirname(__file__)])
except ImportError:
import _rootfind
return _rootfind
if fp is not None:
try:
_mod = imp.load_module('_rootfind', fp, pathname, description)
finally:
fp.close()
return _mod
_rootfind = swig_import_helper()
del swig_import_helper
else:
import _rootfind
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def setFTolerance(*args):
"""
setFTolerance(double tolf)
void setFTolerance(double tolf)
Sets the termination threshold for the change in f.
"""
return _rootfind.setFTolerance(*args)
def setXTolerance(*args):
"""
setXTolerance(double tolx)
void setXTolerance(double tolx)
Sets the termination threshold for the change in x.
"""
return _rootfind.setXTolerance(*args)
def setVectorField(*args):
"""
setVectorField(PyObject * pVFObj) -> int
int setVectorField(PyObject
*pVFObj)
Sets the vector field object, returns 0 if pVFObj = NULL, 1 otherwise.
"""
return _rootfind.setVectorField(*args)
def findRoots(*args):
"""
findRoots(PyObject * startVals, int iter) -> PyObject *
PyObject* findRoots(PyObject
*startVals, int iter)
Performs unconstrained root finding for up to iter iterations Return
values is a tuple indicating: (0,x,n) : convergence reached in x
(1,x,n) : convergence reached in f
(2,x,n) : divergence
(3,x,n) : degeneration of gradient (local extremum or saddle point)
(4,x,n) : maximum iterations reached
(5,x,n) : numerical error occurred where x is the final point and n is
the number of iterations used
"""
return _rootfind.findRoots(*args)
def findRootsBounded(*args):
"""
findRootsBounded(PyObject * startVals, PyObject * boundVals, int iter) -> PyObject *
PyObject*
findRootsBounded(PyObject *startVals, PyObject *boundVals, int iter)
Same as findRoots, but with given bounds (xmin,xmax)
"""
return _rootfind.findRootsBounded(*args)
def destroy():
"""
destroy()
void destroy()
destroys internal data structures
"""
return _rootfind.destroy()
# This file is compatible with both classic and new-style classes.
|
{
"content_hash": "63b66d0f89c337fc9ee11e60aa74b562",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 90,
"avg_line_length": 26.193103448275863,
"alnum_prop": 0.641390205371248,
"repo_name": "stevekuznetsov/Klampt",
"id": "35750ef57d29810b600885e7b3095f05bc153ab4",
"size": "4005",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/klampt/rootfind.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4484"
},
{
"name": "C++",
"bytes": "4381537"
},
{
"name": "CMake",
"bytes": "56295"
},
{
"name": "GLSL",
"bytes": "28"
},
{
"name": "Makefile",
"bytes": "5311"
},
{
"name": "Python",
"bytes": "931058"
},
{
"name": "QMake",
"bytes": "3587"
},
{
"name": "Shell",
"bytes": "283"
}
],
"symlink_target": ""
}
|
import sys
import typing
import coroutine
@coroutine.corouine
def grep(pattern:str) -> typing.Generator[str, str, None]:
print(f"looking for {pattern}")
try:
while True:
line = (yield)
if pattern in line:
print(line,)
except GeneratorExit:
print("Going away. Goodbye ...")
if __name__ == '__main__':
fname = sys.argv[1] if len(sys.argv) > 1 else "grepclose.py"
with open(fname) as fp:
g = grep("python")
for line in fp.readlines():
g.send(line)
|
{
"content_hash": "ae2fb5d2fe6448c66ede849927691a07",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 25.09090909090909,
"alnum_prop": 0.5634057971014492,
"repo_name": "ASMlover/study",
"id": "50002ad9432a911125ccf7402b247a0eaa5470f6",
"size": "1946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/coroutines/grepclose.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3055440"
},
{
"name": "Batchfile",
"bytes": "4662"
},
{
"name": "Brainfuck",
"bytes": "571"
},
{
"name": "C",
"bytes": "13569580"
},
{
"name": "C#",
"bytes": "3959"
},
{
"name": "C++",
"bytes": "14741264"
},
{
"name": "CMake",
"bytes": "543917"
},
{
"name": "CSS",
"bytes": "11505"
},
{
"name": "Common Lisp",
"bytes": "114"
},
{
"name": "Emacs Lisp",
"bytes": "6042"
},
{
"name": "Go",
"bytes": "105203"
},
{
"name": "Groovy",
"bytes": "2907"
},
{
"name": "HTML",
"bytes": "911945"
},
{
"name": "Lex",
"bytes": "9370"
},
{
"name": "Lua",
"bytes": "32829"
},
{
"name": "Makefile",
"bytes": "1000611"
},
{
"name": "NASL",
"bytes": "3609"
},
{
"name": "NewLisp",
"bytes": "5805"
},
{
"name": "Perl",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2752752"
},
{
"name": "SWIG",
"bytes": "91"
},
{
"name": "Shell",
"bytes": "9993"
},
{
"name": "Vim script",
"bytes": "92204"
},
{
"name": "Yacc",
"bytes": "6278"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_user
short_description: Management of users in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.4'
author: Daniel Korn (@dkorn)
description:
- The manageiq_user module supports adding, updating and deleting users in ManageIQ.
options:
state:
description:
- absent - user should not exist, present - user should be.
required: False
choices: ['absent', 'present']
default: 'present'
userid:
description:
- The unique userid in manageiq, often mentioned as username.
required: true
name:
description:
- The users' full name.
required: false
default: null
password:
description:
- The users' password.
required: false
default: null
group:
description:
- The name of the group to which the user belongs.
required: false
default: null
email:
description:
- The users' E-mail address.
required: false
default: null
'''
EXAMPLES = '''
- name: Create a new user in ManageIQ
manageiq_user:
userid: 'jdoe'
name: 'Jane Doe'
password: 'VerySecret'
group: 'EvmGroup-user'
email: 'jdoe@example.com'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Create a new user in ManageIQ using a token
manageiq_user:
userid: 'jdoe'
name: 'Jane Doe'
password: 'VerySecret'
group: 'EvmGroup-user'
email: 'jdoe@example.com'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
verify_ssl: False
- name: Delete a user in ManageIQ
manageiq_user:
state: 'absent'
userid: 'jdoe'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Delete a user in ManageIQ using a token
manageiq_user:
state: 'absent'
userid: 'jdoe'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
verify_ssl: False
- name: Update email of user in ManageIQ
manageiq_user:
userid: 'jdoe'
email: 'jaustine@example.com'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Update email of user in ManageIQ using a token
manageiq_user:
userid: 'jdoe'
email: 'jaustine@example.com'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
verify_ssl: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQUser(object):
"""
Object to execute user management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
def group_id(self, description):
""" Search for group id by group description.
Returns:
the group id, or send a module Fail signal if group not found.
"""
group = self.manageiq.find_collection_resource_by('groups', description=description)
if not group: # group doesn't exist
self.module.fail_json(
msg="group %s does not exist in manageiq" % (description))
return group['id']
def user(self, userid):
""" Search for user object by userid.
Returns:
the user, or None if user not found.
"""
return self.manageiq.find_collection_resource_by('users', userid=userid)
def compare_user(self, user, name, group_id, password, email):
""" Compare user fields with new field values.
Returns:
false if user fields have some difference from new fields, true o/w.
"""
found_difference = (
(name and user['name'] != name) or
(password is not None) or
(email and user['email'] != email) or
(group_id and user['group']['id'] != group_id)
)
return not found_difference
def delete_user(self, user):
""" Deletes a user from manageiq.
Returns:
a short message describing the operation executed.
"""
try:
url = '%s/users/%s' % (self.api_url, user['id'])
result = self.client.post(url, action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
return dict(changed=True, msg=result['message'])
def edit_user(self, user, name, group, password, email):
""" Edit a user from manageiq.
Returns:
a short message describing the operation executed.
"""
group_id = None
url = '%s/users/%s' % (self.api_url, user['id'])
resource = dict(userid=user['userid'])
if group is not None:
group_id = self.group_id(group)
resource['group'] = dict(id=group_id)
if name is not None:
resource['name'] = name
if password is not None:
resource['password'] = password
if email is not None:
resource['email'] = email
# check if we need to update ( compare_user is true is no difference found )
if self.compare_user(user, name, group_id, password, email):
return dict(
changed=False,
msg="user %s is not changed." % (user['userid']))
# try to update user
try:
result = self.client.post(url, action='edit', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
return dict(
changed=True,
msg="successfully updated the user %s: %s" % (user['userid'], result))
def create_user(self, userid, name, group, password, email):
""" Creates the user in manageiq.
Returns:
the created user id, name, created_on timestamp,
updated_on timestamp, userid and current_group_id.
"""
# check for required arguments
for key, value in dict(name=name, group=group, password=password).items():
if value in (None, ''):
self.module.fail_json(msg="missing required argument: %s" % (key))
group_id = self.group_id(group)
url = '%s/users' % (self.api_url)
resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
if email is not None:
resource['email'] = email
# try to create a new user
try:
result = self.client.post(url, action='create', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
return dict(
changed=True,
msg="successfully created the user %s: %s" % (userid, result['results']))
def main():
module = AnsibleModule(
argument_spec=dict(
manageiq_connection=dict(required=True, type='dict',
options=manageiq_argument_spec()),
userid=dict(required=True, type='str'),
name=dict(),
password=dict(no_log=True),
group=dict(),
email=dict(),
state=dict(choices=['absent', 'present'], default='present')
),
)
userid = module.params['userid']
name = module.params['name']
password = module.params['password']
group = module.params['group']
email = module.params['email']
state = module.params['state']
manageiq = ManageIQ(module)
manageiq_user = ManageIQUser(manageiq)
user = manageiq_user.user(userid)
# user should not exist
if state == "absent":
# if we have a user, delete it
if user:
res_args = manageiq_user.delete_user(user)
# if we do not have a user, nothing to do
else:
res_args = dict(
changed=False,
msg="user %s: does not exist in manageiq" % (userid))
# user shoult exist
if state == "present":
# if we have a user, edit it
if user:
res_args = manageiq_user.edit_user(user, name, group, password, email)
# if we do not have a user, create it
else:
res_args = manageiq_user.create_user(userid, name, group, password, email)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
|
{
"content_hash": "ecf37491ce0d37aaa55d2c758c8cb0a0",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 100,
"avg_line_length": 29.473684210526315,
"alnum_prop": 0.5852678571428571,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "cb680c0176600d533e7f33f11b418d208c7e26aa",
"size": "9683",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/remote_management/manageiq/manageiq_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__mirroring",
"__multi_topology_identifier",
"__weight",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__mirroring = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mirroring",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
"state",
]
def _get_mirroring(self):
"""
Getter method for mirroring, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/mirroring (boolean)
YANG Description: When set to true, this indicates that the SID/Label Binding sub-TLV
entries contained within this TLV are indicative of a mirroring
context
"""
return self.__mirroring
def _set_mirroring(self, v, load=False):
"""
Setter method for mirroring, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/mirroring (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mirroring is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mirroring() directly.
YANG Description: When set to true, this indicates that the SID/Label Binding sub-TLV
entries contained within this TLV are indicative of a mirroring
context
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mirroring",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mirroring must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="mirroring", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__mirroring = t
if hasattr(self, "_set"):
self._set()
def _unset_mirroring(self):
self.__mirroring = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mirroring",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_multi_topology_identifier(self):
"""
Getter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/multi_topology_identifier (uint8)
YANG Description: The identifier for the topology to which the SID/Label Binding
sub-TLV is associated. The value of this leaf is a MT-ID as defined
in RFC4915
"""
return self.__multi_topology_identifier
def _set_multi_topology_identifier(self, v, load=False):
"""
Setter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/multi_topology_identifier (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_multi_topology_identifier is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_multi_topology_identifier() directly.
YANG Description: The identifier for the topology to which the SID/Label Binding
sub-TLV is associated. The value of this leaf is a MT-ID as defined
in RFC4915
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """multi_topology_identifier must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="multi-topology-identifier", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__multi_topology_identifier = t
if hasattr(self, "_set"):
self._set()
def _unset_multi_topology_identifier(self):
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/weight (uint8)
YANG Description: The weight of the advertised binding when used for load-balancing
purposes
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: The weight of the advertised binding when used for load-balancing
purposes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
mirroring = __builtin__.property(_get_mirroring)
multi_topology_identifier = __builtin__.property(_get_multi_topology_identifier)
weight = __builtin__.property(_get_weight)
_pyangbind_elements = OrderedDict(
[
("mirroring", mirroring),
("multi_topology_identifier", multi_topology_identifier),
("weight", weight),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__mirroring",
"__multi_topology_identifier",
"__weight",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__mirroring = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mirroring",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
"state",
]
def _get_mirroring(self):
"""
Getter method for mirroring, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/mirroring (boolean)
YANG Description: When set to true, this indicates that the SID/Label Binding sub-TLV
entries contained within this TLV are indicative of a mirroring
context
"""
return self.__mirroring
def _set_mirroring(self, v, load=False):
"""
Setter method for mirroring, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/mirroring (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mirroring is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mirroring() directly.
YANG Description: When set to true, this indicates that the SID/Label Binding sub-TLV
entries contained within this TLV are indicative of a mirroring
context
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mirroring",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mirroring must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="mirroring", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__mirroring = t
if hasattr(self, "_set"):
self._set()
def _unset_mirroring(self):
self.__mirroring = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mirroring",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_multi_topology_identifier(self):
"""
Getter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/multi_topology_identifier (uint8)
YANG Description: The identifier for the topology to which the SID/Label Binding
sub-TLV is associated. The value of this leaf is a MT-ID as defined
in RFC4915
"""
return self.__multi_topology_identifier
def _set_multi_topology_identifier(self, v, load=False):
"""
Setter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/multi_topology_identifier (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_multi_topology_identifier is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_multi_topology_identifier() directly.
YANG Description: The identifier for the topology to which the SID/Label Binding
sub-TLV is associated. The value of this leaf is a MT-ID as defined
in RFC4915
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """multi_topology_identifier must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="multi-topology-identifier", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__multi_topology_identifier = t
if hasattr(self, "_set"):
self._set()
def _unset_multi_topology_identifier(self):
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/weight (uint8)
YANG Description: The weight of the advertised binding when used for load-balancing
purposes
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: The weight of the advertised binding when used for load-balancing
purposes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
mirroring = __builtin__.property(_get_mirroring)
multi_topology_identifier = __builtin__.property(_get_multi_topology_identifier)
weight = __builtin__.property(_get_weight)
_pyangbind_elements = OrderedDict(
[
("mirroring", mirroring),
("multi_topology_identifier", multi_topology_identifier),
("weight", weight),
]
)
|
{
"content_hash": "e002d1fa76f56981569469a0ff546482",
"timestamp": "",
"source": "github",
"line_count": 701,
"max_line_length": 437,
"avg_line_length": 41.80171184022824,
"alnum_prop": 0.5869706173429342,
"repo_name": "napalm-automation/napalm-yang",
"id": "52255ad178458127ba1bc5e320248f7baed533bf",
"size": "29327",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
}
|
from urllib import urlencode
import oauth2 as oauth
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import RequestSite
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from maker.core.rendering import render_to_response
from maker.core.api.auth.forms import AuthorizeRequestTokenForm
from maker.core.api.auth.store import store, InvalidConsumerError, InvalidTokenError
from maker.core.api.auth.utils import verify_oauth_request, get_oauth_request, require_params
@csrf_exempt
def get_request_token(request):
oauth_request = get_oauth_request(request)
missing_params = require_params(oauth_request, ('oauth_callback',))
if missing_params is not None:
return missing_params
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return HttpResponseBadRequest('Invalid Consumer.')
if not verify_oauth_request(request, oauth_request, consumer):
return HttpResponseBadRequest('Could not verify OAuth request.')
request_token = store.create_request_token(request, oauth_request, consumer, oauth_request['oauth_callback'])
ret = urlencode({
'oauth_token': request_token.key,
'oauth_token_secret': request_token.secret,
'oauth_callback_confirmed': 'true'
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@login_required
def authorize_request_token(request, form_class=AuthorizeRequestTokenForm, template_name='core/api/auth/authorize', verification_template_name='core/api/auth/authorize_verification_code'):
if 'oauth_token' not in request.REQUEST:
return HttpResponseBadRequest('No request token specified.')
oauth_request = get_oauth_request(request)
try:
request_token = store.fetch_request_token(request, oauth_request, request.REQUEST['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
consumer = store.get_consumer_for_request_token(request, oauth_request, request_token)
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid() and form.cleaned_data['authorize_access']:
request_token = store.authorize_request_token(request, oauth_request, request_token)
if request_token.callback is not None and request_token.callback != 'oob':
domain = RequestSite(request).domain
return HttpResponseRedirect('%s&%s' % (request_token.get_callback_url(), urlencode({'oauth_token': request_token.key, 'domain': domain})))
else:
return render_to_response(verification_template_name,
{'consumer': consumer, 'verification_code': request_token.verifier},
context_instance=RequestContext(request), response_format='html')
else:
form = form_class(initial={'oauth_token': request_token.key})
return render_to_response(template_name, {'consumer': consumer, 'form': form},
context_instance=RequestContext(request), response_format='html')
@csrf_exempt
def get_access_token(request):
oauth_request = get_oauth_request(request)
missing_params = require_params(oauth_request, ('oauth_token', 'oauth_verifier'))
if missing_params is not None:
return missing_params
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
request_token = store.get_request_token(request, oauth_request, oauth_request['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid consumer.')
except InvalidConsumerError:
return HttpResponseBadRequest('Invalid request token.')
if not verify_oauth_request(request, oauth_request, consumer, request_token):
return HttpResponseBadRequest('Could not verify OAuth request.')
if oauth_request.get('oauth_verifier', None) != request_token.verifier:
return HttpResponseBadRequest('Invalid OAuth verifier.')
access_token = store.create_access_token(request, oauth_request, consumer, request_token)
ret = urlencode({
'oauth_token': access_token.key,
'oauth_token_secret': access_token.secret
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
|
{
"content_hash": "06e96874bfb8b4a43f56569fcd1704ea",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 188,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.7081788440567066,
"repo_name": "alejo8591/maker",
"id": "ee44d6439a9472880f2fed025ead748c6c414a38",
"size": "4637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/api/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1578070"
},
{
"name": "Perl",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2863599"
},
{
"name": "Shell",
"bytes": "3561"
}
],
"symlink_target": ""
}
|
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.imgmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CML'
copyright = u'2017-2018 The CMATHL Team'
author = u'The CMATHL Team'
title = u'CML'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.10'
# The full version, including alpha/beta/rc tags.
release = u'1.10.4'
primary_domain = 'c'
numfig = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['build', 'Thumbs.db', '.DS_Store', 'include.rst', 'specfunc-*.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'display_version': True,
'prev_next_buttons_location': 'both'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CMLdoc'
# -- Options for LaTeX output ---------------------------------------------
my_latex_preamble = '\\DeclareMathOperator\\arccosh{arccosh} \
\\DeclareMathOperator\\arcsinh{arcsinh} \
\\DeclareMathOperator\\arctanh{arctanh} \
\\DeclareMathOperator\\arcsec{arcsec} \
\\DeclareMathOperator\\arccsc{arccsc} \
\\DeclareMathOperator\\arccot{arccot} \
\\DeclareMathOperator\\csch{csch} \
\\DeclareMathOperator\\sech{sech} \
\\DeclareMathOperator\\arcsech{arcsech} \
\\DeclareMathOperator\\arccsch{arccsch} \
\\DeclareMathOperator\\arccoth{arccoth} \
\\DeclareMathOperator\\erf{erf} \
\\DeclareMathOperator\\erfc{erfc} \
\\DeclareMathOperator\\sgn{sgn} \
\\DeclareMathOperator\\sinc{sinc} \
\\DeclareMathOperator\\Var{Var} \
\\DeclareMathOperator\\diag{diag}'
my_latex_authors = 'Ulises Jeremias Cornejo Fandos'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': my_latex_preamble,
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cml-ref.tex', title,
my_latex_authors, 'manual'),
]
imgmath_latex_preamble = my_latex_preamble
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cml', title,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cml-ref', title,
author, 'CML', 'One line description of project.',
'Miscellaneous'),
]
|
{
"content_hash": "b52a60c9c7d4be46fc88c950642db839",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 87,
"avg_line_length": 32.426829268292686,
"alnum_prop": 0.6350131628431741,
"repo_name": "CMATHL/cml",
"id": "b64462f9826eab4d7ad97383e6878b9013a26610",
"size": "5950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "260"
},
{
"name": "C",
"bytes": "381395"
},
{
"name": "C++",
"bytes": "19129"
},
{
"name": "CMake",
"bytes": "10266"
},
{
"name": "Objective-C",
"bytes": "435"
},
{
"name": "Shell",
"bytes": "1350"
}
],
"symlink_target": ""
}
|
import bluetooth
from ble_advertising import advertising_payload
from micropython import const
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_IRQ_GATTS_WRITE = const(3)
_UART_UUID = bluetooth.UUID("6E400001-B5A3-F393-E0A9-E50E24DCCA9E")
_UART_TX = (
bluetooth.UUID("6E400003-B5A3-F393-E0A9-E50E24DCCA9E"),
bluetooth.FLAG_NOTIFY,
)
_UART_RX = (
bluetooth.UUID("6E400002-B5A3-F393-E0A9-E50E24DCCA9E"),
bluetooth.FLAG_WRITE,
)
_UART_SERVICE = (
_UART_UUID,
(_UART_TX, _UART_RX),
)
# org.bluetooth.characteristic.gap.appearance.xml
_ADV_APPEARANCE_GENERIC_COMPUTER = const(128)
class BLEUART:
def __init__(self, ble, name="mpy-uart", rxbuf=100):
self._ble = ble
self._ble.active(True)
self._ble.irq(self._irq)
((self._tx_handle, self._rx_handle),) = self._ble.gatts_register_services((_UART_SERVICE,))
# Increase the size of the rx buffer and enable append mode.
self._ble.gatts_set_buffer(self._rx_handle, rxbuf, True)
self._connections = set()
self._rx_buffer = bytearray()
self._handler = None
# Optionally add services=[_UART_UUID], but this is likely to make the payload too large.
self._payload = advertising_payload(name=name, appearance=_ADV_APPEARANCE_GENERIC_COMPUTER)
self._advertise()
def irq(self, handler):
self._handler = handler
def _irq(self, event, data):
# Track connections so we can send notifications.
if event == _IRQ_CENTRAL_CONNECT:
conn_handle, _, _ = data
self._connections.add(conn_handle)
elif event == _IRQ_CENTRAL_DISCONNECT:
conn_handle, _, _ = data
if conn_handle in self._connections:
self._connections.remove(conn_handle)
# Start advertising again to allow a new connection.
self._advertise()
elif event == _IRQ_GATTS_WRITE:
conn_handle, value_handle = data
if conn_handle in self._connections and value_handle == self._rx_handle:
self._rx_buffer += self._ble.gatts_read(self._rx_handle)
if self._handler:
self._handler()
def any(self):
return len(self._rx_buffer)
def read(self, sz=None):
if not sz:
sz = len(self._rx_buffer)
result = self._rx_buffer[0:sz]
self._rx_buffer = self._rx_buffer[sz:]
return result
def write(self, data):
for conn_handle in self._connections:
self._ble.gatts_notify(conn_handle, self._tx_handle, data)
def close(self):
for conn_handle in self._connections:
self._ble.gap_disconnect(conn_handle)
self._connections.clear()
def _advertise(self, interval_us=500000):
self._ble.gap_advertise(interval_us, adv_data=self._payload)
def demo():
import time
ble = bluetooth.BLE()
uart = BLEUART(ble)
def on_rx():
print("rx: ", uart.read().decode().strip())
uart.irq(handler=on_rx)
nums = [4, 8, 15, 16, 23, 42]
i = 0
try:
while True:
uart.write(str(nums[i]) + "\n")
i = (i + 1) % len(nums)
time.sleep_ms(1000)
except KeyboardInterrupt:
pass
uart.close()
if __name__ == "__main__":
demo()
|
{
"content_hash": "9289a6819877a0e9c020f80e7af3e069",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 99,
"avg_line_length": 29.901785714285715,
"alnum_prop": 0.5962974022096148,
"repo_name": "pramasoul/micropython",
"id": "6d167a871a75e9b0ffddd34286fd687949d82ff9",
"size": "3435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/bluetooth/ble_uart_peripheral.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "55179"
},
{
"name": "C",
"bytes": "35133638"
},
{
"name": "C++",
"bytes": "703228"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "75592"
},
{
"name": "Objective-C",
"bytes": "391937"
},
{
"name": "Python",
"bytes": "588844"
},
{
"name": "Shell",
"bytes": "4829"
}
],
"symlink_target": ""
}
|
"""
Amazon EC2 driver
"""
from libcloud.providers import Provider
from libcloud.types import NodeState, InvalidCredsException
from libcloud.base import Node, Response, ConnectionUserAndKey
from libcloud.base import NodeDriver, NodeSize, NodeImage, NodeLocation
import base64
import hmac
from hashlib import sha256
import time
import urllib
from xml.etree import ElementTree as ET
EC2_US_EAST_HOST = 'ec2.us-east-1.amazonaws.com'
EC2_US_WEST_HOST = 'ec2.us-west-1.amazonaws.com'
EC2_EU_WEST_HOST = 'ec2.eu-west-1.amazonaws.com'
EC2_AP_SOUTHEAST_HOST = 'ec2.ap-southeast-1.amazonaws.com'
API_VERSION = '2009-11-30'
NAMESPACE = "http://ec2.amazonaws.com/doc/%s/" % (API_VERSION)
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
"""
EC2_INSTANCE_TYPES = {
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': 1740,
'disk': 160,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': 7680,
'disk': 850,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': 1690,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': 1740,
'disk': 350,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': 7680,
'disk': 1690,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': 35021,
'disk': 850,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1690,
'bandwidth': None
},
}
EC2_US_EAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_US_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_EU_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_AP_SOUTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
#
# On demand prices must also be hardcoded, because Amazon doesn't provide an
# API to fetch them. From http://aws.amazon.com/ec2/pricing/
#
EC2_US_EAST_INSTANCE_TYPES['m1.small']['price'] = '.085'
EC2_US_EAST_INSTANCE_TYPES['m1.large']['price'] = '.34'
EC2_US_EAST_INSTANCE_TYPES['m1.xlarge']['price'] = '.68'
EC2_US_EAST_INSTANCE_TYPES['c1.medium']['price'] = '.17'
EC2_US_EAST_INSTANCE_TYPES['c1.xlarge']['price'] = '.68'
EC2_US_EAST_INSTANCE_TYPES['m2.xlarge']['price'] = '.50'
EC2_US_EAST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.2'
EC2_US_EAST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.4'
EC2_US_WEST_INSTANCE_TYPES['m1.small']['price'] = '.095'
EC2_US_WEST_INSTANCE_TYPES['m1.large']['price'] = '.38'
EC2_US_WEST_INSTANCE_TYPES['m1.xlarge']['price'] = '.76'
EC2_US_WEST_INSTANCE_TYPES['c1.medium']['price'] = '.19'
EC2_US_WEST_INSTANCE_TYPES['c1.xlarge']['price'] = '.76'
EC2_US_EAST_INSTANCE_TYPES['m2.xlarge']['price'] = '.57'
EC2_US_WEST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.34'
EC2_US_WEST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.68'
EC2_EU_WEST_INSTANCE_TYPES['m1.small']['price'] = '.095'
EC2_EU_WEST_INSTANCE_TYPES['m1.large']['price'] = '.38'
EC2_EU_WEST_INSTANCE_TYPES['m1.xlarge']['price'] = '.76'
EC2_EU_WEST_INSTANCE_TYPES['c1.medium']['price'] = '.19'
EC2_EU_WEST_INSTANCE_TYPES['c1.xlarge']['price'] = '.76'
EC2_US_EAST_INSTANCE_TYPES['m2.xlarge']['price'] = '.57'
EC2_EU_WEST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.34'
EC2_EU_WEST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.68'
# prices are the same
EC2_AP_SOUTHEAST_INSTANCE_TYPES = dict(EC2_EU_WEST_INSTANCE_TYPES)
class EC2Response(Response):
"""
EC2 specific response parsing and error handling.
"""
def parse_body(self):
if not self.body:
return None
return ET.XML(self.body)
def parse_error(self):
err_list = []
# Okay, so for Eucalyptus, you can get a 403, with no body,
# if you are using the wrong user/password.
msg = "Failure: 403 Forbidden"
if self.status == 403 and self.body[:len(msg)] == msg:
raise InvalidCredsException(msg)
for err in ET.XML(self.body).findall('Errors/Error'):
code, message = err.getchildren()
err_list.append("%s: %s" % (code.text, message.text))
if code.text == "InvalidClientTokenId":
raise InvalidCredsException(err_list[-1])
if code.text == "SignatureDoesNotMatch":
raise InvalidCredsException(err_list[-1])
if code.text == "AuthFailure":
raise InvalidCredsException(err_list[-1])
if code.text == "OptInRequired":
raise InvalidCredsException(err_list[-1])
return "\n".join(err_list)
class EC2Connection(ConnectionUserAndKey):
"""
Repersents a single connection to the EC2 Endpoint
"""
host = EC2_US_EAST_HOST
responseCls = EC2Response
def add_default_params(self, params):
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['AWSAccessKeyId'] = self.user_id
params['Version'] = API_VERSION
params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime())
params['Signature'] = self._get_aws_auth_param(params, self.key, self.action)
return params
def _get_aws_auth_param(self, params, secret_key, path='/'):
"""
Creates the signature required for AWS, per
http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
StringToSign = HTTPVerb + "\n" +
ValueOfHostHeaderInLowercase + "\n" +
HTTPRequestURI + "\n" +
CanonicalizedQueryString <from the preceding step>
"""
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
pairs.append(urllib.quote(key, safe='') + '=' +
urllib.quote(params[key], safe='-_~'))
qs = '&'.join(pairs)
string_to_sign = '\n'.join(('GET', self.host, path, qs))
b64_hmac = base64.b64encode(
hmac.new(secret_key, string_to_sign, digestmod=sha256).digest()
)
return b64_hmac
class EC2NodeDriver(NodeDriver):
"""
Amazon EC2 node driver
"""
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2 (us-east-1)'
path = '/'
_instance_types = EC2_US_EAST_INSTANCE_TYPES
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.TERMINATED,
'terminated': NodeState.TERMINATED
}
def _findtext(self, element, xpath):
return element.findtext(self._fixxpath(xpath))
def _fixxpath(self, xpath):
# ElementTree wants namespaces in its xpaths, so here we add them.
return "/".join(["{%s}%s" % (NAMESPACE, e) for e in xpath.split("/")])
def _findattr(self, element, xpath):
return element.findtext(self._fixxpath(xpath))
def _findall(self, element, xpath):
return element.findall(self._fixxpath(xpath))
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params["%s.%s" % (key, i)] = value
return params
def _get_boolean(self, element):
tag = "{%s}%s" % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([ term_status == status
for term_status
in ('shutting-down', 'terminated') ])
def _to_nodes(self, object, xpath, groups=None):
return [ self._to_node(el, groups=groups)
for el in object.findall(self._fixxpath(xpath)) ]
def _to_node(self, element, groups=None):
try:
state = self.NODE_STATE_MAP[
self._findattr(element, "instanceState/name")
]
except KeyError:
state = NodeState.UNKNOWN
n = Node(
id=self._findtext(element, 'instanceId'),
name=self._findtext(element, 'instanceId'),
state=state,
public_ip=[self._findtext(element, 'dnsName')],
private_ip=[self._findtext(element, 'privateDnsName')],
driver=self.connection.driver,
extra={
'dns_name': self._findattr(element, "dnsName"),
'instanceId': self._findattr(element, "instanceId"),
'imageId': self._findattr(element, "imageId"),
'private_dns': self._findattr(element, "privateDnsName"),
'status': self._findattr(element, "instanceState/name"),
'keyname': self._findattr(element, "keyName"),
'launchindex': self._findattr(element, "amiLaunchIndex"),
'productcode':
[p.text for p in self._findall(
element, "productCodesSet/item/productCode"
)],
'instancetype': self._findattr(element, "instanceType"),
'launchdatetime': self._findattr(element, "launchTime"),
'availability': self._findattr(element,
"placement/availabilityZone"),
'kernelid': self._findattr(element, "kernelId"),
'ramdiskid': self._findattr(element, "ramdiskId"),
'groups': groups
}
)
return n
def _to_images(self, object):
return [ self._to_image(el)
for el in object.findall(
self._fixxpath('imagesSet/item')
) ]
def _to_image(self, element):
n = NodeImage(id=self._findtext(element, 'imageId'),
name=self._findtext(element, 'imageLocation'),
driver=self.connection.driver)
return n
def list_nodes(self):
params = {'Action': 'DescribeInstances' }
elem=self.connection.request(self.path, params=params).object
nodes=[]
for rs in self._findall(elem, 'reservationSet/item'):
groups=[g.findtext('')
for g in self._findall(rs, 'groupSet/item/groupId')]
nodes += self._to_nodes(rs, 'instancesSet/item', groups)
return nodes
def list_sizes(self, location=None):
return [ NodeSize(driver=self.connection.driver, **i)
for i in self._instance_types.values() ]
def list_images(self, location=None):
params = {'Action': 'DescribeImages'}
images = self._to_images(
self.connection.request(self.path, params=params).object
)
return images
def ex_create_security_group(self, name, description):
"""Creates a new Security Group
@note: This is a non-standard extension API, and only works for EC2.
@type name: C{str}
@param name: The name of the security group to Create. This must be unique.
@type description: C{str}
@param description: Human readable description of a Security Group.
"""
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
return self.connection.request(self.path, params=params).object
def ex_authorize_security_group_permissive(self, name):
"""Edit a Security Group to allow all traffic.
@note: This is a non-standard extension API, and only works for EC2.
@type name: C{str}
@param name: The name of the security group to edit
"""
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def create_node(self, **kwargs):
"""Create a new EC2 node
See L{NodeDriver.create_node} for more keyword args.
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@keyword ex_mincount: Minimum number of instances to launch
@type ex_mincount: C{int}
@keyword ex_maxcount: Maximum number of instances to launch
@type ex_maxcount: C{int}
@keyword ex_securitygroup: Name of security group
@type ex_securitygroup: C{str}
@keyword ex_keyname: The name of the key pair
@type ex_keyname: C{str}
@keyword ex_userdata: User data
@type ex_userdata: C{str}
"""
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': kwargs.get('ex_mincount','1'),
'MaxCount': kwargs.get('ex_maxcount','1'),
'InstanceType': size.id
}
if 'ex_securitygroup' in kwargs:
if not isinstance(kwargs['ex_securitygroup'], list):
kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']]
for sig in range(len(kwargs['ex_securitygroup'])):
params['SecurityGroup.%d' % (sig+1,)] = kwargs['ex_securitygroup'][sig]
if 'ex_keyname' in kwargs:
params['KeyName'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
params['UserData'] = base64.b64encode(kwargs['ex_userdata'])
object = self.connection.request(self.path, params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
"""
Reboot the node by passing in the node object
"""
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def destroy_node(self, node):
"""
Destroy node by passing in the node object
"""
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_terminate_boolean(res)
def list_locations(self):
return [NodeLocation(0, 'Amazon US N. Virginia', 'US', self)]
class EC2EUConnection(EC2Connection):
"""
Connection class for EC2 in the Western Europe Region
"""
host = EC2_EU_WEST_HOST
class EC2EUNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western Europe Region
"""
name = 'Amazon EC2 (eu-west-1)'
connectionCls = EC2EUConnection
_instance_types = EC2_EU_WEST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon Europe Ireland', 'IE', self)]
class EC2USWestConnection(EC2Connection):
"""
Connection class for EC2 in the Western US Region
"""
host = EC2_US_WEST_HOST
class EC2USWestNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western US Region
"""
name = 'Amazon EC2 (us-west-1)'
connectionCls = EC2USWestConnection
_instance_types = EC2_US_WEST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon US N. California', 'US', self)]
class EC2APSEConnection(EC2Connection):
"""
Connection class for EC2 in the Southeast Asia Pacific Region
"""
host = EC2_AP_SOUTHEAST_HOST
class EC2APSENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific Region
"""
name = 'Amazon EC2 (ap-southeast-1)'
connectionCls = EC2APSEConnection
_instance_types = EC2_AP_SOUTHEAST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon Asia-Pacific Singapore', 'SG', self)]
class EucConnection(EC2Connection):
"""
Connection class for Eucalyptus
"""
host = None
class EucNodeDriver(EC2NodeDriver):
"""
Driver class for Eucalyptus
"""
name = 'Eucalyptus'
connectionCls = EucConnection
_instance_types = EC2_US_WEST_INSTANCE_TYPES
def __init__(self, key, secret=None, secure=True, host=None, path=None, port=None):
super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
if path is None:
path = "/services/Eucalyptus"
self.path = path
def list_locations(self):
raise NotImplementedError, \
'list_locations not implemented for this driver'
|
{
"content_hash": "bf143c927143bf009a4146c39544dddb",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 88,
"avg_line_length": 34.026119402985074,
"alnum_prop": 0.5785722118653361,
"repo_name": "secondstory/dewpoint",
"id": "d5092ed18749b6220bf9025ac0e13cc355c812fd",
"size": "19020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libcloud/drivers/ec2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "214463"
}
],
"symlink_target": ""
}
|
"""urlconf for the base application"""
from django.conf.urls import url, patterns
urlpatterns = patterns('base.views',
#url(r'^$', 'home', name='home'),
)
|
{
"content_hash": "92dcea94c4b6159e8512a9d4066e3f23",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 20.25,
"alnum_prop": 0.6604938271604939,
"repo_name": "jammons/prioritize",
"id": "3638e1abafd17a78e26c644dd35d80630a8fa062",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "161654"
},
{
"name": "JavaScript",
"bytes": "201023"
},
{
"name": "Python",
"bytes": "36088"
},
{
"name": "Ruby",
"bytes": "1470"
},
{
"name": "Shell",
"bytes": "7558"
}
],
"symlink_target": ""
}
|
"""Tests for question domain objects."""
from core.domain import exp_domain
from core.domain import question_domain
from core.tests import test_utils
import utils
class QuestionDomainTest(test_utils.GenericTestBase):
"""Tests for Question domain object."""
def test_to_dict(self):
expected_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': {},
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
observed_object = question_domain.Question(
expected_object['question_id'], expected_object['title'],
expected_object['question_data'],
expected_object['question_data_schema_version'],
expected_object['collection_id'], expected_object['language_code'])
self.assertDictEqual(expected_object, observed_object.to_dict())
def test_validation(self):
"""Test to verify validate method of Question domain object."""
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
test_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': question_data,
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
question = question_domain.Question(
test_object['question_id'], test_object['title'],
test_object['question_data'],
test_object['question_data_schema_version'],
test_object['collection_id'], test_object['language_code'])
question.question_id = 123
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected ID to be a string')):
question.validate()
question.question_id = 'col1.random'
question.update_title(1)
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected title to be a string')):
question.validate()
question.update_title('ABC')
question.update_question_data([])
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected question_data to be a dict')):
question.validate()
question.update_question_data(question_data)
question.question_data_schema_version = 'abc'
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected question_data_schema_version to be a integer')):
question.validate()
question.question_data_schema_version = 1
question.collection_id = 123
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected collection_id to be a string')):
question.validate()
question.collection_id = 'col1'
question.language_code = 123
with self.assertRaisesRegexp(utils.ValidationError, (
'Expected language_code to be a string')):
question.validate()
question.update_language_code('abc')
with self.assertRaisesRegexp(utils.ValidationError, (
'Invalid language code')):
question.validate()
def test_from_dict(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
expected_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': question_data,
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
question = question_domain.Question.from_dict(expected_object)
self.assertDictEqual(expected_object, question.to_dict())
def test_create_default_question(self):
"""Test to verify create_default_question method of Question domain
object."""
question_id = 'col1.random'
collection_id = 'col1'
title = ''
language_code = 'en'
question = question_domain.Question.create_default_question(
question_id, collection_id, title, language_code)
self.assertEqual(question.question_id, question_id)
self.assertEqual(question.collection_id, collection_id)
self.assertEqual(question.question_data_schema_version, 1)
self.assertEqual(question.question_data, {})
self.assertEqual(question.title, '')
self.assertEqual(question.language_code, 'en')
def test_update_methods(self):
"""Tests update_title, update_question_data and update_language_code
methods of the question domain object."""
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
test_object = {
'question_id': 'col1.random',
'title': 'abc',
'question_data': question_data,
'question_data_schema_version': 1,
'collection_id': 'col1',
'language_code': 'en'
}
question = question_domain.Question.from_dict(test_object)
question.update_title('hello')
self.assertEqual(question.title, 'hello')
question.update_question_data({})
self.assertEqual(question.question_data, {})
question.update_language_code('es')
self.assertEqual(question.language_code, 'es')
|
{
"content_hash": "631cbf614acf3e709acd1b20af0c6b8e",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 36.605442176870746,
"alnum_prop": 0.6056495075264821,
"repo_name": "MAKOSCAFEE/oppia",
"id": "5bc32df244c36e9ae122d8c1e6d1e95331ee2093",
"size": "6003",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "core/domain/question_domain_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "101321"
},
{
"name": "HTML",
"bytes": "900382"
},
{
"name": "JavaScript",
"bytes": "2922781"
},
{
"name": "Python",
"bytes": "3701239"
},
{
"name": "Shell",
"bytes": "47818"
}
],
"symlink_target": ""
}
|
from direct.distributed.DistributedObject import DistributedObject
from toontown.catalog.CatalogItemList import CatalogItemList
import time
class CatalogManager(DistributedObject):
notify = directNotify.newCategory('CatalogManager')
neverDisable = 1
def __init__(self, cr):
DistributedObject.__init__(self, cr)
self.popularItems = None
def generate(self):
if base.cr.catalogManager != None:
base.cr.catalogManager.delete()
base.cr.catalogManager = self
DistributedObject.generate(self)
if hasattr(base.localAvatar, 'catalogScheduleNextTime') and base.localAvatar.catalogScheduleNextTime == 0:
self.d_startCatalog()
def disable(self):
base.cr.catalogManager = None
DistributedObject.disable(self)
def delete(self):
base.cr.catalogManager = None
DistributedObject.delete(self)
def d_startCatalog(self):
self.sendUpdate('startCatalog')
def fetchPopularItems(self):
self.sendUpdate('fetchPopularItems')
def setPopularItems(self, popularItems):
self.popularItems = CatalogItemList(popularItems)
messenger.send('PopularItemsSet')
|
{
"content_hash": "19dedc0fb8e7cc9b8fb5e7723f832a70",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 114,
"avg_line_length": 30.923076923076923,
"alnum_prop": 0.6965174129353234,
"repo_name": "Spiderlover/Toontown",
"id": "6d9d51e2e0f0a57c565145665463b7edc3aa8299",
"size": "1206",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "toontown/catalog/CatalogManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.client import ClientError
from collections import Counter
from concurrent.futures import as_completed
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
import logging
import itertools
import time
from c7n.actions import Action, ActionRegistry
from c7n.filters import (
FilterRegistry, ValueFilter, AgeFilter, Filter, FilterValidationError,
OPERATORS)
from c7n.filters.offhours import OffHour, OnHour
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n import query
from c7n.tags import TagActionFilter, DEFAULT_TAG, TagCountFilter, TagTrim
from c7n.utils import (
local_session, type_schema, chunks, get_retry, worker)
log = logging.getLogger('custodian.asg')
filters = FilterRegistry('asg.filters')
actions = ActionRegistry('asg.actions')
filters.register('offhour', OffHour)
filters.register('onhour', OnHour)
filters.register('tag-count', TagCountFilter)
filters.register('marked-for-op', TagActionFilter)
@resources.register('asg')
class ASG(query.QueryResourceManager):
class resource_type(object):
service = 'autoscaling'
type = 'autoScalingGroup'
id = name = 'AutoScalingGroupName'
date = 'CreatedTime'
dimension = 'AutoScalingGroupName'
enum_spec = ('describe_auto_scaling_groups', 'AutoScalingGroups', None)
filter_name = 'AutoScalingGroupNames'
filter_type = 'list'
config_type = 'AWS::AutoScaling::AutoScalingGroup'
default_report_fields = (
'AutoScalingGroupName',
'CreatedTime',
'LaunchConfigurationName',
'count:Instances',
'DesiredCapacity',
'HealthCheckType',
'list:LoadBalancerNames',
)
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('ResourceInUse', 'Throttling',)))
class LaunchConfigFilterBase(object):
"""Mixin base class for querying asg launch configs."""
permissions = ("autoscaling:DescribeLaunchConfigurations",)
configs = None
def initialize(self, asgs):
"""Get launch configs for the set of asgs"""
config_names = set()
skip = []
for a in asgs:
# Per https://github.com/capitalone/cloud-custodian/issues/143
if 'LaunchConfigurationName' not in a:
skip.append(a)
continue
config_names.add(a['LaunchConfigurationName'])
for a in skip:
asgs.remove(a)
self.configs = {}
self.log.debug(
"Querying launch configs for filter %s",
self.__class__.__name__)
configs = self.manager.get_resource_manager(
'launch-config').resources()
self.configs = {
cfg['LaunchConfigurationName']: cfg for cfg in configs}
@filters.register('security-group')
class SecurityGroupFilter(
net_filters.SecurityGroupFilter, LaunchConfigFilterBase):
RelatedIdsExpression = ""
def get_permissions(self):
return ("autoscaling:DescribeLaunchConfigurations",
"ec2:DescribeSecurityGroups",)
def get_related_ids(self, asgs):
group_ids = set()
for asg in asgs:
cfg = self.configs.get(asg['LaunchConfigurationName'])
group_ids.update(cfg.get('SecurityGroups', ()))
return group_ids
def process(self, asgs, event=None):
self.initialize(asgs)
return super(SecurityGroupFilter, self).process(asgs, event)
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = ""
def get_related_ids(self, asgs):
subnet_ids = set()
for asg in asgs:
subnet_ids.update(
[sid.strip() for sid in asg.get('VPCZoneIdentifier', '').split(',')])
return subnet_ids
filters.register('network-location', net_filters.NetworkLocation)
@filters.register('launch-config')
class LaunchConfigFilter(ValueFilter, LaunchConfigFilterBase):
"""Filter asg by launch config attributes.
:example:
.. code-block: yaml
policies:
- name: launch-config-public-ip
resource: asg
filters:
- type: launch-config
key: AssociatePublicIpAddress
value: true
"""
schema = type_schema(
'launch-config', rinherit=ValueFilter.schema)
permissions = ("autoscaling:DescribeLaunchConfigurations",)
def process(self, asgs, event=None):
self.initialize(asgs)
return super(LaunchConfigFilter, self).process(asgs, event)
def __call__(self, asg):
# Active launch configs can be deleted..
cfg = self.configs.get(asg['LaunchConfigurationName'])
return self.match(cfg)
class ConfigValidFilter(Filter, LaunchConfigFilterBase):
def get_permissions(self):
return list(itertools.chain([
self.manager.get_resource_manager(m).get_permissions()
for m in ('subnet', 'security-group', 'key-pair', 'elb',
'app-elb-target-group', 'ebs-snapshot', 'ami')]))
def validate(self):
if self.manager.data.get('mode'):
raise FilterValidationError(
"invalid-config makes too many queries to be run in lambda")
return self
def initialize(self, asgs):
super(ConfigValidFilter, self).initialize(asgs)
# pylint: disable=attribute-defined-outside-init
self.subnets = self.get_subnets()
self.security_groups = self.get_security_groups()
self.key_pairs = self.get_key_pairs()
self.elbs = self.get_elbs()
self.appelb_target_groups = self.get_appelb_target_groups()
self.snapshots = self.get_snapshots()
self.images, self.image_snaps = self.get_images()
def get_subnets(self):
manager = self.manager.get_resource_manager('subnet')
return set([s['SubnetId'] for s in manager.resources()])
def get_security_groups(self):
manager = self.manager.get_resource_manager('security-group')
return set([s['GroupId'] for s in manager.resources()])
def get_key_pairs(self):
manager = self.manager.get_resource_manager('key-pair')
return set([k['KeyName'] for k in manager.resources()])
def get_elbs(self):
manager = self.manager.get_resource_manager('elb')
return set([e['LoadBalancerName'] for e in manager.resources()])
def get_appelb_target_groups(self):
manager = self.manager.get_resource_manager('app-elb-target-group')
return set([a['TargetGroupArn'] for a in manager.resources()])
def get_images(self):
manager = self.manager.get_resource_manager('ami')
images = set()
image_snaps = set()
image_ids = list({lc['ImageId'] for lc in self.configs.values()})
# Pull account images, we should be able to utilize cached values,
# drawn down the image population to just images not in the account.
account_images = [
i for i in manager.resources() if i['ImageId'] in image_ids]
account_image_ids = {i['ImageId'] for i in account_images}
image_ids = [image_id for image_id in image_ids
if image_id not in account_image_ids]
# To pull third party images, we explicitly use a describe
# source without any cache.
#
# Can't use a config source since it won't have state for
# third party ami, we auto propagate source normally, so we
# explicitly pull a describe source. Can't use a cache either
# as their not in the account.
#
while image_ids:
try:
amis = manager.get_source('describe').get_resources(
image_ids, cache=False)
account_images.extend(amis)
break
except ClientError as e:
msg = e.response['Error']['Message']
if e.response['Error']['Code'] != 'InvalidAMIID.NotFound':
raise
for n in msg[msg.find('[') + 1: msg.find(']')].split(','):
image_ids.remove(n.strip())
for a in account_images:
images.add(a['ImageId'])
# Capture any snapshots, images strongly reference their
# snapshots, and some of these will be third party in the
# case of a third party image.
for bd in a.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
image_snaps.add(bd['Ebs']['SnapshotId'].strip())
return images, image_snaps
def get_snapshots(self):
manager = self.manager.get_resource_manager('ebs-snapshot')
return set([s['SnapshotId'] for s in manager.resources()])
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ConfigValidFilter, self).process(asgs, event)
def get_asg_errors(self, asg):
errors = []
subnets = asg.get('VPCZoneIdentifier', '').split(',')
for subnet in subnets:
subnet = subnet.strip()
if subnet not in self.subnets:
errors.append(('invalid-subnet', subnet))
for elb in asg['LoadBalancerNames']:
elb = elb.strip()
if elb not in self.elbs:
errors.append(('invalid-elb', elb))
for appelb_target in asg.get('TargetGroupARNs', []):
appelb_target = appelb_target.strip()
if appelb_target not in self.appelb_target_groups:
errors.append(('invalid-appelb-target-group', appelb_target))
cfg_id = asg.get(
'LaunchConfigurationName', asg['AutoScalingGroupName'])
cfg_id = cfg_id.strip()
cfg = self.configs.get(cfg_id)
if cfg is None:
errors.append(('invalid-config', cfg_id))
self.log.debug(
"asg:%s no launch config found" % asg['AutoScalingGroupName'])
asg['Invalid'] = errors
return True
for sg in cfg['SecurityGroups']:
sg = sg.strip()
if sg not in self.security_groups:
errors.append(('invalid-security-group', sg))
if cfg['KeyName'] and cfg['KeyName'].strip() not in self.key_pairs:
errors.append(('invalid-key-pair', cfg['KeyName']))
if cfg['ImageId'].strip() not in self.images:
errors.append(('invalid-image', cfg['ImageId']))
for bd in cfg['BlockDeviceMappings']:
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
snapshot_id = bd['Ebs']['SnapshotId'].strip()
if snapshot_id in self.image_snaps:
continue
if snapshot_id not in self.snapshots:
errors.append(('invalid-snapshot', bd['Ebs']['SnapshotId']))
return errors
@filters.register('valid')
class ValidConfigFilter(ConfigValidFilter):
"""Filters autoscale groups to find those that are structurally valid.
This operates as the inverse of the invalid filter for multi-step
workflows.
See details on the invalid filter for a list of checks made.
:example:
.. code-base: yaml
policies:
- name: asg-valid-config
resource: asg
filters:
- valid
"""
schema = type_schema('valid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
return not bool(errors)
@filters.register('invalid')
class InvalidConfigFilter(ConfigValidFilter):
"""Filter autoscale groups to find those that are structurally invalid.
Structurally invalid means that the auto scale group will not be able
to launch an instance succesfully as the configuration has
- invalid subnets
- invalid security groups
- invalid key pair name
- invalid launch config volume snapshots
- invalid amis
- invalid health check elb (slower)
Internally this tries to reuse other resource managers for better
cache utilization.
:example:
.. code-base: yaml
policies:
- name: asg-invalid-config
resource: asg
filters:
- invalid
"""
schema = type_schema('invalid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
if errors:
asg['Invalid'] = errors
return True
@filters.register('not-encrypted')
class NotEncryptedFilter(Filter, LaunchConfigFilterBase):
"""Check if an ASG is configured to have unencrypted volumes.
Checks both the ami snapshots and the launch configuration.
:example:
.. code-block: yaml
policies:
- name: asg-unencrypted
resource: asg
filters:
- type: not-encrypted
exclude_image: true
"""
schema = type_schema('not-encrypted', exclude_image={'type': 'boolean'})
permissions = (
'ec2:DescribeImages',
'ec2:DescribeSnapshots',
'autoscaling:DescribeLaunchConfigurations')
images = unencrypted_configs = unencrypted_images = None
# TODO: resource-manager, notfound err mgr
def process(self, asgs, event=None):
self.initialize(asgs)
return super(NotEncryptedFilter, self).process(asgs, event)
def __call__(self, asg):
cfg = self.configs.get(asg['LaunchConfigurationName'])
if not cfg:
self.log.warning(
"ASG %s instances: %d has missing config: %s",
asg['AutoScalingGroupName'], len(asg['Instances']),
asg['LaunchConfigurationName'])
return False
unencrypted = []
if (not self.data.get('exclude_image') and cfg['ImageId'] in self.unencrypted_images):
unencrypted.append('Image')
if cfg['LaunchConfigurationName'] in self.unencrypted_configs:
unencrypted.append('LaunchConfig')
if unencrypted:
asg['Unencrypted'] = unencrypted
return bool(unencrypted)
def initialize(self, asgs):
super(NotEncryptedFilter, self).initialize(asgs)
ec2 = local_session(self.manager.session_factory).client('ec2')
self.unencrypted_images = self.get_unencrypted_images(ec2)
self.unencrypted_configs = self.get_unencrypted_configs(ec2)
def _fetch_images(self, ec2, image_ids):
while True:
try:
return ec2.describe_images(ImageIds=list(image_ids))
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidAMIID.NotFound':
msg = e.response['Error']['Message']
e_ami_ids = [
e_ami_id.strip() for e_ami_id
in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')]
self.log.warning(
"asg:not-encrypted filter image not found %s",
e_ami_ids)
for e_ami_id in e_ami_ids:
image_ids.remove(e_ami_id)
continue
raise
def get_unencrypted_images(self, ec2):
"""retrieve images which have unencrypted snapshots referenced."""
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
self.log.debug("querying %d images", len(image_ids))
results = self._fetch_images(ec2, image_ids)
self.images = {i['ImageId']: i for i in results['Images']}
unencrypted_images = set()
for i in self.images.values():
for bd in i['BlockDeviceMappings']:
if 'Ebs' in bd and not bd['Ebs'].get('Encrypted'):
unencrypted_images.add(i['ImageId'])
break
return unencrypted_images
def get_unencrypted_configs(self, ec2):
"""retrieve configs that have unencrypted ebs voluems referenced."""
unencrypted_configs = set()
snaps = {}
for cid, c in self.configs.items():
image = self.images.get(c['ImageId'])
# image deregistered/unavailable
if image is not None:
image_block_devs = {
bd['DeviceName']: bd['Ebs']
for bd in image['BlockDeviceMappings'] if 'Ebs' in bd}
else:
image_block_devs = {}
for bd in c['BlockDeviceMappings']:
if 'Ebs' not in bd:
continue
# Launch configs can shadow image devices, images have
# precedence.
if bd['DeviceName'] in image_block_devs:
continue
if 'SnapshotId' in bd['Ebs']:
snaps.setdefault(
bd['Ebs']['SnapshotId'].strip(), []).append(cid)
elif not bd['Ebs'].get('Encrypted'):
unencrypted_configs.add(cid)
if not snaps:
return unencrypted_configs
self.log.debug("querying %d snapshots", len(snaps))
for s in self.get_snapshots(ec2, list(snaps.keys())):
if not s.get('Encrypted'):
unencrypted_configs.update(snaps[s['SnapshotId']])
return unencrypted_configs
def get_snapshots(self, ec2, snap_ids):
"""get snapshots corresponding to id, but tolerant of missing."""
while True:
try:
result = ec2.describe_snapshots(SnapshotIds=snap_ids)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidSnapshot.NotFound':
msg = e.response['Error']['Message']
e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")]
self.log.warning("Snapshot not found %s" % e_snap_id)
snap_ids.remove(e_snap_id)
continue
raise
else:
return result.get('Snapshots', ())
@filters.register('image-age')
class ImageAgeFilter(AgeFilter, LaunchConfigFilterBase):
"""Filter asg by image age (in days).
:example:
.. code-block: yaml
policies:
- name: asg-older-image
resource: asg
filters:
- type: image-age
days: 90
op: ge
"""
permissions = (
"ec2:DescribeImages",
"autoscaling:DescribeLaunchConfigurations")
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'})
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ImageAgeFilter, self).process(asgs, event)
def initialize(self, asgs):
super(ImageAgeFilter, self).initialize(asgs)
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
results = self.manager.get_resource_manager('ami').resources()
self.images = {i['ImageId']: i for i in results}
def get_resource_date(self, i):
cfg = self.configs[i['LaunchConfigurationName']]
ami = self.images.get(cfg['ImageId'], {})
return parse(ami.get(
self.date_attribute, "2000-01-01T01:01:01.000Z"))
@filters.register('image')
class ImageFilter(ValueFilter, LaunchConfigFilterBase):
"""Filter asg by image
:example:
.. code-block: yaml
policies:
- name: asg-image-tag
resource: asg
filters:
- type: image
value: "tag:ImageTag"
key: "TagValue"
op: eq
"""
permissions = (
"ec2:DescribeImages",
"autoscaling:DescribeLaunchConfigurations")
schema = type_schema('image', rinherit=ValueFilter.schema)
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ImageFilter, self).process(asgs, event)
def initialize(self, asgs):
super(ImageFilter, self).initialize(asgs)
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
results = self.manager.get_resource_manager('ami').resources()
base_image_map = {i['ImageId']: i for i in results}
resources = {i: base_image_map[i] for i in image_ids if i in base_image_map}
missing = list(set(image_ids) - set(resources.keys()))
if missing:
loaded = self.manager.get_resource_manager('ami').get_resources(missing, False)
resources.update({image['ImageId']: image for image in loaded})
self.images = resources
def __call__(self, i):
cfg = self.configs[i['LaunchConfigurationName']]
image = self.images.get(cfg['ImageId'], {})
# Finally, if we have no image...
if not image:
self.log.warning(
"Could not locate image for instance:%s ami:%s" % (
i['InstanceId'], i["ImageId"]))
# Match instead on empty skeleton?
return False
return self.match(image)
@filters.register('vpc-id')
class VpcIdFilter(ValueFilter):
"""Filters ASG based on the VpcId
This filter is available as a ValueFilter as the vpc-id is not natively
associated to the results from describing the autoscaling groups.
:example:
.. code-block: yaml
policies:
- name: asg-vpc-xyz
resource: asg
filters:
- type: vpc-id
value: vpc-12ab34cd
"""
schema = type_schema(
'vpc-id', rinherit=ValueFilter.schema)
schema['properties'].pop('key')
permissions = ('ec2:DescribeSubnets',)
# TODO: annotation
def __init__(self, data, manager=None):
super(VpcIdFilter, self).__init__(data, manager)
self.data['key'] = 'VpcId'
def process(self, asgs, event=None):
subnets = {}
for a in asgs:
subnet_ids = a.get('VPCZoneIdentifier', '')
if not subnet_ids:
continue
subnets.setdefault(subnet_ids.split(',')[0], []).append(a)
subnet_manager = self.manager.get_resource_manager('subnet')
# Invalid subnets on asgs happen, so query all
all_subnets = {s['SubnetId']: s for s in subnet_manager.resources()}
for s, s_asgs in subnets.items():
if s not in all_subnets:
self.log.warning(
"invalid subnet %s for asgs: %s",
s, [a['AutoScalingGroupName'] for a in s_asgs])
continue
for a in s_asgs:
a['VpcId'] = all_subnets[s]['VpcId']
return super(VpcIdFilter, self).process(asgs)
@actions.register('tag-trim')
class GroupTagTrim(TagTrim):
"""Action to trim the number of tags to avoid hitting tag limits
:example:
.. code-block: yaml
policies:
- name: asg-tag-trim
resource: asg
filters:
- type: tag-count
count: 10
actions:
- type: tag-trim
space: 1
preserve:
- OwnerName
- OwnerContact
"""
max_tag_count = 10
permissions = ('autoscaling:DeleteTags',)
def process_tag_removal(self, resource, candidates):
client = local_session(
self.manager.session_factory).client('autoscaling')
tags = []
for t in candidates:
tags.append(
dict(Key=t, ResourceType='auto-scaling-group',
ResourceId=resource['AutoScalingGroupName']))
client.delete_tags(Tags=tags)
@filters.register('capacity-delta')
class CapacityDelta(Filter):
"""Filter returns ASG that have less instances than desired or required
:example:
.. code-block: yaml
policies:
- name: asg-capacity-delta
resource: asg
filters:
- capacity-delta
"""
schema = type_schema('capacity-delta')
def process(self, asgs, event=None):
return [a for a in asgs
if len(a['Instances']) < a['DesiredCapacity'] or
len(a['Instances']) < a['MinSize']]
@actions.register('resize')
class Resize(Action):
"""Action to resize the min/max/desired instances in an ASG
There are several ways to use this action:
1. set min/desired to current running instances
.. code-block: yaml
policies:
- name: asg-resize
resource: asg
filters:
- capacity-delta
actions:
- type: resize
desired-size: "current"
2. apply a fixed resize of min, max or desired, optionally saving the
previous values to a named tag (for restoring later):
.. code-block: yaml
policies:
- name: offhours-asg-off
resource: asg
filters:
- type: offhour
offhour: 19
default_tz: bst
actions:
- type: resize
min-size: 0
desired-size: 0
save-options-tag: OffHoursPrevious
3. restore previous values for min/max/desired from a tag:
.. code-block: yaml
policies:
- name: offhours-asg-on
resource: asg
filters:
- type: onhour
onhour: 8
default_tz: bst
actions:
- type: resize
restore-options-tag: OffHoursPrevious
"""
schema = type_schema(
'resize',
**{
'min-size': {'type': 'integer', 'minimum': 0},
'max-size': {'type': 'integer', 'minimum': 0},
'desired-size': {
"anyOf": [
{'enum': ["current"]},
{'type': 'integer', 'minimum': 0}
]
},
# support previous key name with underscore
'desired_size': {
"anyOf": [
{'enum': ["current"]},
{'type': 'integer', 'minimum': 0}
]
},
'save-options-tag': {'type': 'string'},
'restore-options-tag': {'type': 'string'},
}
)
permissions = (
'autoscaling:UpdateAutoScalingGroup',
'autoscaling:CreateOrUpdateTags'
)
def validate(self):
# if self.data['desired_size'] != 'current':
# raise FilterValidationError(
# "only resizing desired/min to current capacity is supported")
return self
def process(self, asgs):
# ASG parameters to save to/restore from a tag
asg_params = ['MinSize', 'MaxSize', 'DesiredCapacity']
# support previous param desired_size when desired-size is not present
if 'desired_size' in self.data and 'desired-size' not in self.data:
self.data['desired-size'] = self.data['desired_size']
client = local_session(self.manager.session_factory).client(
'autoscaling')
for a in asgs:
tag_map = {t['Key']: t['Value'] for t in a.get('Tags', [])}
update = {}
current_size = len(a['Instances'])
if 'restore-options-tag' in self.data:
# we want to restore all ASG size params from saved data
log.debug('Want to restore ASG %s size from tag %s' %
(a['AutoScalingGroupName'], self.data['restore-options-tag']))
if self.data['restore-options-tag'] in tag_map:
for field in tag_map[self.data['restore-options-tag']].split(';'):
(param, value) = field.split('=')
if param in asg_params:
update[param] = int(value)
else:
# we want to resize, parse provided params
if 'min-size' in self.data:
update['MinSize'] = self.data['min-size']
if 'max-size' in self.data:
update['MaxSize'] = self.data['max-size']
if 'desired-size' in self.data:
if self.data['desired-size'] == 'current':
update['DesiredCapacity'] = min(current_size, a['DesiredCapacity'])
if 'MinSize' not in update:
# unless we were given a new value for min_size then
# ensure it is at least as low as current_size
update['MinSize'] = min(current_size, a['MinSize'])
elif type(self.data['desired-size']) == int:
update['DesiredCapacity'] = self.data['desired-size']
if update:
log.debug('ASG %s size: current=%d, min=%d, max=%d, desired=%d'
% (a['AutoScalingGroupName'], current_size, a['MinSize'],
a['MaxSize'], a['DesiredCapacity']))
if 'save-options-tag' in self.data:
# save existing ASG params to a tag before changing them
log.debug('Saving ASG %s size to tag %s' %
(a['AutoScalingGroupName'], self.data['save-options-tag']))
tags = [dict(
Key=self.data['save-options-tag'],
PropagateAtLaunch=False,
Value=';'.join({'%s=%d' % (param, a[param]) for param in asg_params}),
ResourceId=a['AutoScalingGroupName'],
ResourceType='auto-scaling-group',
)]
self.manager.retry(client.create_or_update_tags, Tags=tags)
log.debug('Resizing ASG %s with %s' % (a['AutoScalingGroupName'],
str(update)))
self.manager.retry(
client.update_auto_scaling_group,
AutoScalingGroupName=a['AutoScalingGroupName'],
**update)
else:
log.debug('nothing to resize')
@actions.register('remove-tag')
@actions.register('untag')
@actions.register('unmark')
class RemoveTag(Action):
"""Action to remove tag/tags from an ASG
:example:
.. code-block: yaml
policies:
- name: asg-remove-unnecessary-tags
resource: asg
filters:
- "tag:UnnecessaryTag": present
actions:
- type: remove-tag
key: UnnecessaryTag
"""
schema = type_schema(
'remove-tag',
aliases=('untag', 'unmark'),
key={'type': 'string'})
permissions = ('autoscaling:DeleteTags',)
batch_size = 1
def process(self, asgs):
error = False
key = self.data.get('key', DEFAULT_TAG)
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(self.process_asg_set, asg_set, key)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
error = f.exception()
self.log.exception(
"Exception untagging asg:%s tag:%s error:%s" % (
", ".join([a['AutoScalingGroupName']
for a in asg_set]),
self.data.get('key', DEFAULT_TAG),
f.exception()))
if error:
raise error
def process_asg_set(self, asgs, key):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
tags = [dict(
Key=key, ResourceType='auto-scaling-group',
ResourceId=a['AutoScalingGroupName']) for a in asgs]
self.manager.retry(client.delete_tags, Tags=tags)
@actions.register('tag')
@actions.register('mark')
class Tag(Action):
"""Action to add a tag to an ASG
The *propagate* parameter can be used to specify that the tag being added
will need to be propagated down to each ASG instance associated or simply
to the ASG itself.
:example:
.. code-block: yaml
policies:
- name: asg-add-owner-tag
resource: asg
filters:
- "tag:OwnerName": absent
actions:
- type: tag
key: OwnerName
value: OwnerName
propagate: true
"""
schema = type_schema(
'tag',
key={'type': 'string'},
value={'type': 'string'},
# Backwards compatibility
tag={'type': 'string'},
msg={'type': 'string'},
propagate={'type': 'boolean'},
aliases=('mark',)
)
permissions = ('autoscaling:CreateOrUpdateTags',)
batch_size = 1
def process(self, asgs):
key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
value = self.data.get(
'value', self.data.get(
'msg', 'AutoScaleGroup does not meet policy guidelines'))
return self.tag(asgs, key, value)
def tag(self, asgs, key, value):
error = None
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(
self.process_asg_set, asg_set, key, value)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
self.log.exception(
"Exception untagging tag:%s error:%s asg:%s" % (
self.data.get('key', DEFAULT_TAG),
f.exception(),
", ".join([a['AutoScalingGroupName']
for a in asg_set])))
if error:
raise error
def process_asg_set(self, asgs, key, value):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
propagate = self.data.get('propagate_launch', True)
tags = [
dict(Key=key, ResourceType='auto-scaling-group', Value=value,
PropagateAtLaunch=propagate,
ResourceId=a['AutoScalingGroupName']) for a in asgs]
self.manager.retry(client.create_or_update_tags, Tags=tags)
@actions.register('propagate-tags')
class PropagateTags(Action):
"""Propagate tags to an asg instances.
In AWS changing an asg tag does not propagate to instances.
This action exists to do that, and can also trim older tags
not present on the asg anymore that are present on instances.
:example:
.. code-block: yaml
policies:
- name: asg-propagate-required
resource: asg
filters:
- "tag:OwnerName": present
actions:
- type: propagate-tags
tags:
- OwnerName
"""
schema = type_schema(
'propagate-tags',
tags={'type': 'array', 'items': {'type': 'string'}},
trim={'type': 'boolean'})
permissions = ('ec2:DeleteTags', 'ec2:CreateTags')
def validate(self):
if not isinstance(self.data.get('tags', []), (list, tuple)):
raise ValueError("No tags specified")
return self
def process(self, asgs):
if not asgs:
return
if self.data.get('trim', False):
self.instance_map = self.get_instance_map(asgs)
with self.executor_factory(max_workers=10) as w:
instance_count = sum(list(w.map(self.process_asg, asgs)))
self.log.info("Applied tags to %d instances" % instance_count)
def process_asg(self, asg):
client = local_session(self.manager.session_factory).client('ec2')
instance_ids = [i['InstanceId'] for i in asg['Instances']]
tag_map = {t['Key']: t['Value'] for t in asg.get('Tags', [])
if t['PropagateAtLaunch'] and not t['Key'].startswith('aws:')}
if self.data.get('tags'):
tag_map = {
k: v for k, v in tag_map.items()
if k in self.data['tags']}
tag_set = set(tag_map)
if self.data.get('trim', False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun:
client.create_tags(
Resources=instance_ids,
Tags=[{'Key': k, 'Value': v} for k, v in tag_map.items()])
return len(instance_ids)
def prune_instance_tags(self, client, asg, tag_set, instances):
"""Remove tags present on all asg instances which are not present
on the asg.
"""
instance_tags = Counter()
instance_count = len(instances)
remove_tags = []
extra_tags = []
for i in instances:
instance_tags.update([
t['Key'] for t in i['Tags']
if not t['Key'].startswith('aws:')])
for k, v in instance_tags.items():
if not v >= instance_count:
extra_tags.append(k)
continue
if k not in tag_set:
remove_tags.append(k)
if remove_tags:
log.debug("Pruning asg:%s instances:%d of old tags: %s" % (
asg['AutoScalingGroupName'], instance_count, remove_tags))
if extra_tags:
log.debug("Asg: %s has uneven tags population: %s" % (
asg['AutoScalingGroupName'], instance_tags))
# Remove orphan tags
remove_tags.extend(extra_tags)
if not self.manager.config.dryrun:
client.delete_tags(
Resources=[i['InstanceId'] for i in instances],
Tags=[{'Key': t} for t in remove_tags])
def get_instance_map(self, asgs):
instance_ids = [
i['InstanceId'] for i in
list(itertools.chain(*[
g['Instances']
for g in asgs if g['Instances']]))]
if not instance_ids:
return {}
return {i['InstanceId']: i for i in
self.manager.get_resource_manager(
'ec2').get_resources(instance_ids)}
@actions.register('rename-tag')
class RenameTag(Action):
"""Rename a tag on an AutoScaleGroup.
:example:
.. code-block: yaml
policies:
- name: asg-rename-owner-tag
resource: asg
filters:
- "tag:OwnerNames": present
actions:
- type: rename-tag
propagate: true
source: OwnerNames
dest: OwnerName
"""
schema = type_schema(
'rename-tag', required=['source', 'dest'],
propagate={'type': 'boolean'},
source={'type': 'string'},
dest={'type': 'string'})
def get_permissions(self):
permissions = (
'autoscaling:CreateOrUpdateTags',
'autoscaling:DeleteTags')
if self.data.get('propagate', True):
permissions += ('ec2:CreateTags', 'ec2:DeleteTags')
return permissions
def process(self, asgs):
source = self.data.get('source')
dest = self.data.get('dest')
count = len(asgs)
filtered = []
for a in asgs:
for t in a.get('Tags'):
if t['Key'] == source:
filtered.append(a)
break
asgs = filtered
self.log.info("Filtered from %d asgs to %d", count, len(asgs))
self.log.info(
"Renaming %s to %s on %d asgs", source, dest, len(filtered))
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Move source tag to destination tag.
Check tag count on asg
Create new tag tag
Delete old tag
Check tag count on instance
Create new tag
Delete old tag
"""
source_tag = self.data.get('source')
tag_map = {t['Key']: t for t in asg.get('Tags', [])}
source = tag_map[source_tag]
destination_tag = self.data.get('dest')
propagate = self.data.get('propagate', True)
client = local_session(
self.manager.session_factory).client('autoscaling')
# technically safer to create first, but running into
# max tags constraints, otherwise.
#
# delete_first = len([t for t in tag_map if not t.startswith('aws:')])
client.delete_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'Key': source_tag,
'Value': source['Value']}])
client.create_or_update_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'PropagateAtLaunch': propagate,
'Key': destination_tag,
'Value': source['Value']}])
if propagate:
self.propagate_instance_tag(source, destination_tag, asg)
def propagate_instance_tag(self, source, destination_tag, asg):
client = local_session(self.manager.session_factory).client('ec2')
client.delete_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{"Key": source['Key']}])
client.create_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{'Key': destination_tag, 'Value': source['Value']}])
@actions.register('mark-for-op')
class MarkForOp(Tag):
"""Action to create a delayed action for a later date
:example:
.. code-block: yaml
policies:
- name: asg-suspend-schedule
resource: asg
filters:
- type: value
key: MinSize
value: 2
actions:
- type: mark-for-op
tag: custodian_suspend
message: "Suspending: {op}@{action_date}"
op: suspend
days: 7
"""
schema = type_schema(
'mark-for-op',
op={'enum': ['suspend', 'resume', 'delete']},
key={'type': 'string'},
tag={'type': 'string'},
message={'type': 'string'},
days={'type': 'number', 'minimum': 0})
default_template = (
'AutoScaleGroup does not meet org policy: {op}@{action_date}')
def process(self, asgs):
msg_tmpl = self.data.get('message', self.default_template)
key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
op = self.data.get('op', 'suspend')
date = self.data.get('days', 4)
n = datetime.now(tz=tzutc())
stop_date = n + timedelta(days=date)
try:
msg = msg_tmpl.format(
op=op, action_date=stop_date.strftime('%Y/%m/%d'))
except Exception:
self.log.warning("invalid template %s" % msg_tmpl)
msg = self.default_template.format(
op=op, action_date=stop_date.strftime('%Y/%m/%d'))
self.log.info("Tagging %d asgs for %s on %s" % (
len(asgs), op, stop_date.strftime('%Y/%m/%d')))
self.tag(asgs, key, msg)
@actions.register('suspend')
class Suspend(Action):
"""Action to suspend ASG processes and instances
AWS ASG suspend/resume and process docs https://goo.gl/XYtKQ8
:example:
.. code-block: yaml
policies:
- name: asg-suspend-processes
resource: asg
filters:
- "tag:SuspendTag": present
actions:
- type: suspend
"""
permissions = ("autoscaling:SuspendProcesses", "ec2:StopInstances")
ASG_PROCESSES = [
"Launch",
"Terminate",
"HealthCheck",
"ReplaceUnhealthy",
"AZRebalance",
"AlarmNotification",
"ScheduledActions",
"AddToLoadBalancer"]
schema = type_schema(
'suspend',
exclude={
'type': 'array',
'title': 'ASG Processes to not suspend',
'items': {'enum': ASG_PROCESSES}})
ASG_PROCESSES = set(ASG_PROCESSES)
def process(self, asgs):
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Multistep process to stop an asg aprori of setup
- suspend processes
- stop instances
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
processes = list(self.ASG_PROCESSES.difference(
self.data.get('exclude', ())))
try:
self.manager.retry(
asg_client.suspend_processes,
ScalingProcesses=processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
return
raise
ec2_client = session.client('ec2')
try:
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.stop_instances, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] in (
'InvalidInstanceID.NotFound',
'IncorrectInstanceState'):
log.warning("Erroring stopping asg instances %s %s" % (
asg['AutoScalingGroupName'], e))
return
raise
@actions.register('resume')
class Resume(Action):
"""Resume a suspended autoscale group and its instances
Parameter 'delay' is the amount of time (in seconds) to wait between
resuming each instance within the ASG (default value: 30)
:example:
.. code-block: yaml
policies:
- name: asg-resume-processes
resource: asg
filters:
- "tag:Resume": present
actions:
- type: resume
delay: 300
"""
schema = type_schema('resume', delay={'type': 'number'})
permissions = ("autoscaling:ResumeProcesses", "ec2:StartInstances")
def process(self, asgs):
original_count = len(asgs)
asgs = [a for a in asgs if a['SuspendedProcesses']]
self.delay = self.data.get('delay', 30)
self.log.debug("Filtered from %d to %d suspended asgs",
original_count, len(asgs))
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg_instances, a)] = a
for f in as_completed(futures):
if f.exception():
log.error("Traceback resume asg:%s instances error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
continue
log.debug("Sleeping for asg health check grace")
time.sleep(self.delay)
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg, a)] = a
for f in as_completed(futures):
if f.exception():
log.error("Traceback resume asg:%s error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
def resume_asg_instances(self, asg):
"""Resume asg instances.
"""
session = local_session(self.manager.session_factory)
ec2_client = session.client('ec2')
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.start_instances, InstanceIds=instance_ids)
def resume_asg(self, asg):
"""Resume asg processes.
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
self.manager.retry(
asg_client.resume_processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
@actions.register('delete')
class Delete(Action):
"""Action to delete an ASG
The 'force' parameter is needed when deleting an ASG that has instances
attached to it.
:example:
.. code-block: yaml
policies:
- name: asg-unencrypted
resource: asg
filters:
- type: not-encrypted
exclude_image: true
actions:
- type: delete
force: true
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = ("autoscaling:DeleteAutoScalingGroup",)
def process(self, asgs):
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
@worker
def process_asg(self, asg):
force_delete = self.data.get('force', False)
if force_delete:
log.info('Forcing deletion of Auto Scaling group %s',
asg['AutoScalingGroupName'])
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
try:
self.manager.retry(
asg_client.delete_auto_scaling_group,
AutoScalingGroupName=asg['AutoScalingGroupName'],
ForceDelete=force_delete)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
log.warning("Erroring deleting asg %s %s",
asg['AutoScalingGroupName'], e)
return
raise
@resources.register('launch-config')
class LaunchConfig(query.QueryResourceManager):
class resource_type(object):
service = 'autoscaling'
type = 'launchConfiguration'
id = name = 'LaunchConfigurationName'
date = 'CreatedTime'
dimension = None
enum_spec = (
'describe_launch_configurations', 'LaunchConfigurations', None)
filter_name = 'LaunchConfigurationNames'
filter_type = 'list'
config_type = 'AWS::AutoScaling::LaunchConfiguration'
def get_source(self, source_type):
if source_type == 'describe':
return DescribeLaunchConfig(self)
elif source_type == 'config':
return query.ConfigSource(self)
raise ValueError('invalid source %s' % source_type)
class DescribeLaunchConfig(query.DescribeSource):
def augment(self, resources):
for r in resources:
r.pop('UserData', None)
return resources
@LaunchConfig.filter_registry.register('age')
class LaunchConfigAge(AgeFilter):
"""Filter ASG launch configuration by age (in days)
:example:
.. code-block: yaml
policies:
- name: asg-launch-config-old
resource: launch-config
filters:
- type: age
days: 90
op: ge
"""
date_attribute = "CreatedTime"
schema = type_schema(
'age',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'})
@LaunchConfig.filter_registry.register('unused')
class UnusedLaunchConfig(Filter):
"""Filters all launch configurations that are not in use but exist
:example:
.. code-block: yaml
policies:
- name: asg-unused-launch-config
resource: launch-config
filters:
- unused
"""
schema = type_schema('unused')
def get_permissions(self):
return self.manager.get_resource_manager('asg').get_permissions()
def process(self, configs, event=None):
asgs = self.manager.get_resource_manager('asg').resources()
self.used = set([
a.get('LaunchConfigurationName', a['AutoScalingGroupName'])
for a in asgs])
return super(UnusedLaunchConfig, self).process(configs)
def __call__(self, config):
return config['LaunchConfigurationName'] not in self.used
@LaunchConfig.action_registry.register('delete')
class LaunchConfigDelete(Action):
"""Filters all unused launch configurations
:example:
.. code-block: yaml
policies:
- name: asg-unused-launch-config-delete
resource: launch-config
filters:
- unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ("autoscaling:DeleteLaunchConfiguration",)
def process(self, configs):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_config, configs))
@worker
def process_config(self, config):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
try:
client.delete_launch_configuration(
LaunchConfigurationName=config[
'LaunchConfigurationName'])
except ClientError as e:
# Catch already deleted
if e.response['Error']['Code'] == 'ValidationError':
return
raise
|
{
"content_hash": "a1b0be1d74ca63ad7e2fa7d8e4985918",
"timestamp": "",
"source": "github",
"line_count": 1623,
"max_line_length": 94,
"avg_line_length": 33.8909426987061,
"alnum_prop": 0.5485137714753204,
"repo_name": "jimmyraywv/cloud-custodian",
"id": "3b0ed2495afe1910d1bf5398a13c33d9482f4f2c",
"size": "55595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/resources/asg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "Python",
"bytes": "1760566"
}
],
"symlink_target": ""
}
|
import gevent
import os
import sys
import socket
import errno
import uuid
import logging
import coverage
import cgitb
cgitb.enable(format='text')
import fixtures
import testtools
from testtools.matchers import Equals, MismatchError, Not, Contains
from testtools import content, content_type, ExpectedException
import unittest
import re
import json
import copy
from lxml import etree
import inspect
import requests
import stevedore
from vnc_api.vnc_api import *
import keystoneclient.apiclient.exceptions as kc_exceptions
import keystoneclient.v2_0.client as keystone
from keystoneclient.middleware import auth_token
from cfgm_common import rest, utils
import cfgm_common
sys.path.append('../common/tests')
import test_utils
import test_common
import test_case
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
PERMS_NONE = 0
PERMS_X = 1
PERMS_W = 2
PERMS_R = 4
PERMS_WX = 3
PERMS_RX = 5
PERMS_RW = 6
PERMS_RWX = 7
# create users specified as array of tuples (name, password, role)
# assumes admin user and tenant exists
class User(object):
def __init__(self, apis_ip, apis_port, kc, name, password, role, project):
self.name = name
self.password = password
self.role = role
self.project = project
self.project_uuid = None
self.project_obj = None
# create user/role/tenant in keystone as needed
kc_users = set([user.name for user in kc.users.list()])
kc_roles = set([user.name for user in kc.roles.list()])
kc_tenants = set([tenant.name for tenant in kc.tenants.list()])
if self.role not in kc_roles:
logger.info('role %s missing from keystone ... creating' % self.role)
kc.roles.create(self.role)
if self.project not in kc_tenants:
logger.info( 'tenant %s missing from keystone ... creating' % self.project)
kc.tenants.create(self.project)
for tenant in kc.tenants.list():
if tenant.name == self.project:
break
self.project_uuid = tenant.id
if self.name not in kc_users:
logger.info( 'user %s missing from keystone ... creating' % self.name)
user = kc.users.create(self.name, self.password, '', tenant_id=tenant.id)
role_dict = {role.name:role for role in kc.roles.list()}
user_dict = {user.name:user for user in kc.users.list()}
logger.info( 'Adding user %s with role %s to tenant %s' \
% (name, role, project))
try:
kc.roles.add_user_role(user_dict[self.name], role_dict[self.role], tenant)
except kc_exceptions.Conflict:
pass
self.vnc_lib = MyVncApi(username = self.name, password = self.password,
tenant_name = self.project,
api_server_host = apis_ip, api_server_port = apis_port)
# end __init__
def api_acl_name(self):
rg_name = list(self.project_obj.get_fq_name())
rg_name.append('default-api-access-list')
return rg_name
def check_perms(self, obj_uuid):
query = 'token=%s&uuid=%s' % (self.vnc_lib.get_token(), obj_uuid)
rv = self.vnc_lib._request_server(rest.OP_GET, "/obj-perms", data=query)
rv = json.loads(rv)
return rv['permissions']
# display resource id-perms
def print_perms(obj_perms):
share_perms = ['%s:%d' % (x.tenant, x.tenant_access) for x in obj_perms.share]
return '%s/%d %d %s' \
% (obj_perms.owner, obj_perms.owner_access,
obj_perms.global_access, share_perms)
# end print_perms
# set id perms for object
def set_perms(obj, owner=None, owner_access=None, share=None, global_access=None):
try:
perms = obj.get_perms2()
except AttributeError:
logger.info( '*** Unable to set perms2 in object %s' % obj.get_fq_name())
sys.exit()
logger.info( 'Current perms %s = %s' % (obj.get_fq_name(), print_perms(perms)))
if owner:
perms.owner = owner
if owner_access:
perms.owner_access = owner_access
if share is not None:
perms.share = [ShareType(obj_uuid, obj_crud) for (obj_uuid, obj_crud) in share]
if global_access is not None:
perms.global_access = global_access
obj.set_perms2(perms)
logger.info( 'New perms %s = %s' % (obj.get_fq_name(), print_perms(perms)))
# end set_perms
# Read VNC object. Return None if object doesn't exists
def vnc_read_obj(vnc, obj_type, name = None, obj_uuid = None):
if name is None and obj_uuid is None:
logger.info( 'Need FQN or UUID to read object')
return None
method_name = obj_type.replace('-', '_')
method = getattr(vnc, "%s_read" % (method_name))
try:
if obj_uuid:
if '-' not in obj_uuid:
obj_uuid = str(uuid.UUID(obj_uuid))
return method(id=obj_uuid)
else:
return method(fq_name=name)
except NoIdError:
logger.info( '%s %s not found!' % (obj_type, name if name else obj_uuid))
return None
except PermissionDenied:
logger.info( 'Permission denied reading %s %s' % (obj_type, name))
raise
# end
def show_rbac_rules(api_access_list_entries):
if api_access_list_entries is None:
logger.info( 'Empty RBAC group!')
return
# {u'rbac_rule': [{u'rule_object': u'*', u'rule_perms': [{u'role_crud': u'CRUD', u'role_name': u'admin'}], u'rule_field': None}]}
rule_list = api_access_list_entries.get_rbac_rule()
logger.info( 'Rules (%d):' % len(rule_list))
logger.info( '----------')
idx = 1
for rule in rule_list:
o = rule.rule_object
f = rule.rule_field
ps = ''
for p in rule.rule_perms:
ps += p.role_name + ':' + p.role_crud + ','
o_f = "%s.%s" % (o,f) if f else o
logger.info( '%2d %-32s %s' % (idx, o_f, ps))
idx += 1
logger.info( '')
def build_rule(rule_str):
r = rule_str.split(" ") if rule_str else []
if len(r) < 2:
return None
# [0] is object.field, [1] is list of perms
obj_field = r[0].split(".")
perms = r[1].split(",")
o = obj_field[0]
f = obj_field[1] if len(obj_field) > 1 else None
o_f = "%s.%s" % (o,f) if f else o
# perms eg ['foo:CRU', 'bar:CR']
rule_perms = []
for perm in perms:
p = perm.split(":")
rule_perms.append(RbacPermType(role_name = p[0], role_crud = p[1]))
# build rule
rule = RbacRuleType(
rule_object = o,
rule_field = f,
rule_perms = rule_perms)
return rule
#end
def match_rule(rule_list, rule_str):
extend_rule_list = True
nr = build_rule(rule_str)
for r in rule_list:
if r.rule_object != nr.rule_object or r.rule_field != nr.rule_field:
continue
# object and field match - fix rule in place
extend_rule_list = False
for np in nr.rule_perms:
extend_perms = True
for op in r.rule_perms:
if op.role_name == np.role_name:
# role found - merge incoming and existing crud in place
x = set(list(op.role_crud)) | set(list(np.role_crud))
op.role_crud = ''.join(x)
extend_perms = False
if extend_perms:
r.rule_perms.append(RbacPermType(role_name = np.role_name, role_crud = np.role_crud))
if extend_rule_list:
rule_list.append(nr)
# end match_rule
def vnc_fix_api_access_list(vnc_lib, pobj, rule_str = None):
rg_name = list(pobj.get_fq_name())
rg_name.append('default-api-access-list')
rg = vnc_read_obj(vnc_lib, 'api-access-list', name = rg_name)
create = False
rule_list = []
if rg == None:
rg = ApiAccessList(
name = 'default-api-access-list',
parent_obj = pobj,
api_access_list_entries = None)
create = True
elif rule_str:
api_access_list_entries = rg.get_api_access_list_entries()
rule_list = api_access_list_entries.get_rbac_rule()
if rule_str:
rule = match_rule(rule_list, rule_str)
rentry = RbacRuleEntriesType(rule_list)
rg.set_api_access_list_entries(rentry)
if create:
logger.info( 'API access list empty. Creating with default rule')
vnc_lib.api_access_list_create(rg)
else:
vnc_lib.api_access_list_update(rg)
show_rbac_rules(rg.get_api_access_list_entries())
def token_from_user_info(user_name, tenant_name, domain_name, role_name,
tenant_id = None):
token_dict = {
'X-User': user_name,
'X-User-Name': user_name,
'X-Project-Name': tenant_name,
'X-Project-Id': tenant_id or '',
'X-Domain-Name' : domain_name,
'X-Role': role_name,
}
rval = json.dumps(token_dict)
# logger.info( '**** Generate token %s ****' % rval)
return rval
class MyVncApi(VncApi):
def __init__(self, username = None, password = None,
tenant_name = None, api_server_host = None, api_server_port = None):
self._username = username
self._tenant_name = tenant_name
self.auth_token = None
self._kc = keystone.Client(username='admin', password='contrail123',
tenant_name='admin',
auth_url='http://127.0.0.1:5000/v2.0')
VncApi.__init__(self, username = username, password = password,
tenant_name = tenant_name, api_server_host = api_server_host,
api_server_port = api_server_port)
def _authenticate(self, response=None, headers=None):
role_name = self._kc.user_role(self._username, self._tenant_name)
uobj = self._kc.users.get(self._username)
rval = token_from_user_info(self._username, self._tenant_name,
'default-domain', role_name, uobj.tenant_id)
new_headers = headers or {}
new_headers['X-AUTH-TOKEN'] = rval
self.auth_token = rval
return new_headers
def get_token(self):
return self.auth_token
# This is needed for VncApi._authenticate invocation from within Api server.
# We don't have access to user information so we hard code admin credentials.
def ks_admin_authenticate(self, response=None, headers=None):
rval = token_from_user_info('admin', 'admin', 'default-domain', 'admin')
new_headers = {}
new_headers['X-AUTH-TOKEN'] = rval
return new_headers
class TestPermissions(test_case.ApiServerTestCase):
domain_name = 'default-domain'
fqdn = [domain_name]
vn_name='alice-vn'
def setUp(self):
extra_mocks = [(keystone.Client,
'__new__', test_utils.FakeKeystoneClient),
(vnc_api.vnc_api.VncApi,
'_authenticate', ks_admin_authenticate),
(auth_token, 'AuthProtocol',
test_utils.FakeAuthProtocol)]
extra_config_knobs = [
('DEFAULTS', 'multi_tenancy_with_rbac', 'True'),
('DEFAULTS', 'auth', 'keystone'),
]
super(TestPermissions, self).setUp(extra_mocks=extra_mocks,
extra_config_knobs=extra_config_knobs)
ip = self._api_server_ip
port = self._api_server_port
# kc = test_utils.get_keystone_client()
kc = keystone.Client(username='admin', password='contrail123',
tenant_name='admin',
auth_url='http://127.0.0.1:5000/v2.0')
# prepare token before vnc api invokes keystone
alice = User(ip, port, kc, 'alice', 'alice123', 'alice-role', 'alice-proj')
bob = User(ip, port, kc, 'bob', 'bob123', 'bob-role', 'bob-proj')
admin = User(ip, port, kc, 'admin', 'contrail123', 'admin', 'admin')
self.alice = alice
self.bob = bob
self.admin = admin
self.users = [self.alice, self.bob]
"""
1. create project in API server
2. read objects back and pupolate locally
3. reassign ownership of projects to user from admin
"""
for user in [admin, alice, bob]:
project_obj = Project(user.project)
project_obj.uuid = user.project_uuid
logger.info( 'Creating Project object for %s, uuid %s' \
% (user.project, user.project_uuid))
admin.vnc_lib.project_create(project_obj)
# read projects back
user.project_obj = vnc_read_obj(admin.vnc_lib,
'project', obj_uuid = user.project_uuid)
logger.info( 'Change owner of project %s to %s' % (user.project, user.project_uuid))
set_perms(user.project_obj, owner=user.project_uuid, share = [])
admin.vnc_lib.project_update(user.project_obj)
# delete test VN if it exists
vn_fq_name = [self.domain_name, alice.project, self.vn_name]
vn = vnc_read_obj(admin.vnc_lib, 'virtual-network', name = vn_fq_name)
if vn:
logger.info( '%s exists ... deleting to start fresh' % vn_fq_name)
admin.vnc_lib.virtual_network_delete(fq_name = vn_fq_name)
# allow permission to create objects
for user in self.users:
logger.info( "%s: project %s to allow full access to role %s" % \
(user.name, user.project, user.role))
vnc_fix_api_access_list(self.admin.vnc_lib, user.project_obj,
rule_str = '* %s:CRUD' % user.role)
# delete api-access-list for alice and bob and disallow api access to their projects
# then try to create VN in the project. This should fail
def test_api_access(self):
logger.info('')
logger.info( '########### API ACCESS (CREATE) ##################')
alice = self.alice
bob = self.bob
admin = self.admin
rv_json = admin.vnc_lib._request(rest.OP_GET, '/multi-tenancy-with-rbac')
rv = json.loads(rv_json)
self.assertEquals(rv["enabled"], True)
# disable rbac
# delete api-access-list for alice and bob and disallow api access to their projects
for user in self.users:
logger.info( "Delete api-acl for project %s to disallow api access" % user.project)
vnc_fix_api_access_list(self.admin.vnc_lib, user.project_obj, rule_str = None)
logger.info( 'alice: trying to create VN in her project')
vn = VirtualNetwork(self.vn_name, self.alice.project_obj)
try:
self.alice.vnc_lib.virtual_network_create(vn)
self.assertTrue(False, '*** Created virtual network ... test failed!')
except PermissionDenied as e:
self.assertTrue(True, 'Failed to create VN ... Test passes!')
# allow permission to create virtual-network
for user in self.users:
logger.info( "%s: project %s to allow full access to role %s" % \
(user.name, user.project, user.role))
# note that collection API is set for create operation
vnc_fix_api_access_list(self.admin.vnc_lib, user.project_obj,
rule_str = 'virtual-networks %s:C' % user.role)
logger.info( '')
logger.info( 'alice: trying to create VN in her project')
try:
self.alice.vnc_lib.virtual_network_create(vn)
logger.info( 'Created virtual network %s ... test passed!' % vn.get_fq_name())
testfail = False
except PermissionDenied as e:
logger.info( 'Failed to create VN ... Test failed!')
testfail = True
self.assertThat(testfail, Equals(False))
logger.info('')
logger.info( '########### API ACCESS (READ) ##################')
logger.info( 'alice: trying to read VN in her project (should fail)')
try:
vn_fq_name = [self.domain_name, self.alice.project, self.vn_name]
vn = vnc_read_obj(self.alice.vnc_lib, 'virtual-network', name = vn_fq_name)
self.assertTrue(False, '*** Read VN without read permission')
except PermissionDenied as e:
self.assertTrue(True, 'Unable to read VN ... test passed')
# allow read access
vnc_fix_api_access_list(self.admin.vnc_lib, self.alice.project_obj,
rule_str = 'virtual-network %s:R' % self.alice.role)
logger.info( 'alice: added permission to read virtual-network')
logger.info( 'alice: trying to read VN in her project (should succeed)')
try:
vn = vnc_read_obj(self.alice.vnc_lib, 'virtual-network', name = vn_fq_name)
self.assertTrue(True, 'Read VN successfully ... test passed')
except PermissionDenied as e:
self.assertTrue(False, '*** Read VN failed ... test failed!!!')
logger.info('')
logger.info( '########### API ACCESS (UPDATE) ##################')
logger.info( 'alice: trying to update VN in her project (should fail)')
try:
vn.display_name = "foobar"
alice.vnc_lib.virtual_network_update(vn)
self.assertTrue(False, '*** Set field in VN ... test failed!')
testfail += 1
except PermissionDenied as e:
self.assertTrue(True, 'Unable to update field in VN ... Test succeeded!')
# give update API access to alice
vnc_fix_api_access_list(admin.vnc_lib, alice.project_obj,
rule_str = 'virtual-network %s:U' % alice.role)
logger.info( '')
logger.info( 'alice: added permission to update virtual-network')
logger.info( 'alice: trying to set field in her VN ')
try:
vn.display_name = "foobar"
alice.vnc_lib.virtual_network_update(vn)
self.assertTrue(True, 'Set field in VN ... test passed!')
except PermissionDenied as e:
self.assertTrue(False, '*** Failed to update field in VN ... Test failed!')
testfail += 1
if testfail > 0:
sys.exit()
vn2 = vnc_read_obj(alice.vnc_lib, 'virtual-network', name = vn.get_fq_name())
logger.info( 'alice: display_name %s' % vn2.display_name)
self.assertEquals(vn2.display_name, "foobar")
logger.info('')
logger.info( '####### API ACCESS (update field restricted to admin) ##############')
logger.info( 'Restricting update of field to admin only ')
vnc_fix_api_access_list(admin.vnc_lib, alice.project_obj,
rule_str = 'virtual-network.display_name admin:U')
try:
vn.display_name = "alice"
alice.vnc_lib.virtual_network_update(vn)
self.assertTrue(False, '*** Set field in VN ... test failed!')
except PermissionDenied as e:
self.assertTrue(True, 'Failed to update field in VN ... Test passed!')
logger.info('')
logger.info( '########### API ACCESS (DELETE) ##################')
logger.info( 'alice: try deleting VN .. should fail')
# delete test VN ... should fail
vn_fq_name = [self.domain_name, alice.project, self.vn_name]
try:
alice.vnc_lib.virtual_network_delete(fq_name = vn_fq_name)
self.assertTrue(False, '*** Deleted VN ... test failed!')
except PermissionDenied as e:
self.assertTrue(True, 'Error deleting VN ... test passed!')
logger.info('')
logger.info( '############### PERMS2 ##########################')
logger.info( 'Giving bob API level access to perform all ops on virtual-network')
logger.info( "Bob should'nt be able to create VN in alice project because only\n ")
logger.info( 'owner (alice) has write permission in her project')
vnc_fix_api_access_list(admin.vnc_lib, bob.project_obj,
rule_str = 'virtual-network %s:CRUD' % bob.role)
logger.info( '')
logger.info( 'bob: trying to create VN in alice project ... should fail')
try:
vn2 = VirtualNetwork('bob-vn-in-alice-project', alice.project_obj)
bob.vnc_lib.virtual_network_create(vn2)
self.assertTrue(False, '*** Created virtual network ... test failed!')
except PermissionDenied as e:
self.assertTrue(True, 'Failed to create VN ... Test passed!')
vn = vnc_read_obj(alice.vnc_lib, 'virtual-network', name = vn_fq_name)
logger.info('')
logger.info( '######### READ (SHARED WITH ANOTHER TENANT) ############')
logger.info( 'Disable share in virtual networks for others')
set_perms(vn, share = [], global_access = PERMS_NONE)
alice.vnc_lib.virtual_network_update(vn)
logger.info( 'Reading VN as bob ... should fail')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
self.assertTrue(False, '*** Succeeded in reading VN. Test failed!')
except PermissionDenied as e:
self.assertTrue(True, 'Failed to read VN ... Test passed!')
logger.info( 'Enable share in virtual network for bob project')
set_perms(vn, share = [(bob.project_uuid, PERMS_R)])
alice.vnc_lib.virtual_network_update(vn)
logger.info( 'Reading VN as bob ... should succeed')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
self.assertTrue(True, 'Succeeded in reading VN. Test passed!')
except PermissionDenied as e:
self.assertTrue(False, '*** Failed to read VN ... Test failed!')
logger.info('')
logger.info( '########### READ (DISABLE READ SHARING) ##################')
logger.info( 'Disable share in virtual networks for others')
set_perms(vn, share = [])
alice.vnc_lib.virtual_network_update(vn)
logger.info( 'Reading VN as bob ... should fail')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
self.assertTrue(False, 'Succeeded in reading VN. Test failed!')
except PermissionDenied as e:
self.assertTrue(True, '*** Failed to read VN ... Test passed!')
logger.info('')
logger.info( '########### READ (GLOBALLY SHARED) ##################')
logger.info( 'Enable virtual networks in alice project for global sharing (read only)')
set_perms(vn, share = [], global_access = PERMS_R)
alice.vnc_lib.virtual_network_update(vn)
logger.info( 'Reading VN as bob ... should succeed')
try:
net_obj = bob.vnc_lib.virtual_network_read(id=vn.get_uuid())
self.assertTrue(True, 'Succeeded in reading VN. Test passed!')
except PermissionDenied as e:
self.assertTrue(False, '*** Failed to read VN ... Test failed!')
logger.info( '########### WRITE (GLOBALLY SHARED) ##################')
logger.info( 'Writing shared VN as bob ... should fail')
try:
vn.display_name = "foobar"
bob.vnc_lib.virtual_network_update(vn)
self.assertTrue(False, '*** Succeeded in updating VN. Test failed!!')
except PermissionDenied as e:
self.assertTrue(True, 'Failed to update VN ... Test passed!')
logger.info('')
logger.info( 'Enable virtual networks in alice project for global sharing (read, write)')
logger.info( 'Writing shared VN as bob ... should succeed')
# important: read VN afresh to overwrite display_name update pending status
vn = vnc_read_obj(self.alice.vnc_lib, 'virtual-network', name = vn_fq_name)
set_perms(vn, global_access = PERMS_RW)
alice.vnc_lib.virtual_network_update(vn)
try:
vn.display_name = "foobar"
bob.vnc_lib.virtual_network_update(vn)
self.assertTrue(True, 'Succeeded in updating VN. Test passed!')
except PermissionDenied as e:
self.assertTrue(False, '*** Failed to update VN ... Test failed!!')
logger.info( '')
logger.info( '########################### COLLECTIONS #################')
logger.info( 'User should be able to see VN in own project and any shared')
logger.info( 'alice: get virtual network collection ... should fail')
try:
x = alice.vnc_lib.virtual_networks_list(parent_id = alice.project_uuid)
self.assertTrue(False,
'*** Read VN collection without list permission ... test failed!')
except PermissionDenied as e:
self.assertTrue(True, 'Failed to read VN collection ... test passed')
# allow permission to read virtual-network collection
for user in [alice, bob]:
logger.info( "%s: project %s to allow collection access to role %s" % \
(user.name, user.project, user.role))
# note that collection API is set for create operation
vnc_fix_api_access_list(admin.vnc_lib, user.project_obj,
rule_str = 'virtual-networks %s:CR' % user.role)
# create one more VN in alice project to differentiate from what bob sees
vn2 = VirtualNetwork('second-vn', alice.project_obj)
alice.vnc_lib.virtual_network_create(vn2)
logger.info( 'Alice: created additional VN %s in her project' % vn2.get_fq_name())
logger.info( 'Alice: network list')
x = alice.vnc_lib.virtual_networks_list(parent_id = alice.project_uuid)
for item in x['virtual-networks']:
logger.info( ' %s: %s' % (item['uuid'], item['fq_name']))
expected = set(['alice-vn', 'second-vn'])
received = set([item['fq_name'][-1] for item in x['virtual-networks']])
self.assertEquals(expected, received)
logger.info('')
logger.info( 'Bob: network list')
y = bob.vnc_lib.virtual_networks_list(parent_id = bob.project_uuid)
for item in y['virtual-networks']:
logger.info( ' %s: %s' % (item['uuid'], item['fq_name']))
# need changes in auto code generation for lists
expected = set(['alice-vn'])
received = set([item['fq_name'][-1] for item in y['virtual-networks']])
self.assertEquals(expected, received)
def test_check_obj_perms_api(self):
logger.info('')
logger.info( '########### CHECK OBJ PERMS API ##################')
alice = self.alice
bob = self.bob
admin = self.admin
# allow permission to create virtual-network
for user in self.users:
logger.info( "%s: project %s to allow full access to role %s" % \
(user.name, user.project, user.role))
# note that collection API is set for create operation
vnc_fix_api_access_list(self.admin.vnc_lib, user.project_obj,
rule_str = 'virtual-networks %s:CRUD' % user.role)
logger.info( '')
logger.info( 'alice: trying to create VN in her project')
vn = VirtualNetwork(self.vn_name, self.alice.project_obj)
try:
self.alice.vnc_lib.virtual_network_create(vn)
logger.info( 'Created virtual network %s ... test passed!' % vn.get_fq_name())
testfail = False
except PermissionDenied as e:
logger.info( 'Failed to create VN ... Test failed!')
testfail = True
self.assertThat(testfail, Equals(False))
ExpectedPerms = {'admin':'RWX', 'alice':'RWX', 'bob':''}
for user in self.users:
perms = user.check_perms(vn.get_uuid())
self.assertEquals(perms, ExpectedPerms[user.name])
logger.info( 'Enable share in virtual network for bob project')
vn_fq_name = [self.domain_name, alice.project, self.vn_name]
vn = vnc_read_obj(self.alice.vnc_lib, 'virtual-network', name = vn_fq_name)
set_perms(vn, share = [(bob.project_uuid, PERMS_R)])
alice.vnc_lib.virtual_network_update(vn)
ExpectedPerms = {'admin':'RWX', 'alice':'RWX', 'bob':'R'}
for user in self.users:
perms = user.check_perms(vn.get_uuid())
self.assertEquals(perms, ExpectedPerms[user.name])
logger.info('')
logger.info( '########### READ (DISABLE READ SHARING) ##################')
logger.info( 'Disable share in virtual networks for others')
set_perms(vn, share = [])
alice.vnc_lib.virtual_network_update(vn)
ExpectedPerms = {'admin':'RWX', 'alice':'RWX', 'bob':''}
for user in self.users:
perms = user.check_perms(vn.get_uuid())
self.assertEquals(perms, ExpectedPerms[user.name])
logger.info( 'Reading VN as bob ... should fail')
logger.info('')
logger.info( '########### READ (GLOBALLY SHARED) ##################')
logger.info( 'Enable virtual networks in alice project for global sharing (read only)')
set_perms(vn, share = [], global_access = PERMS_R)
alice.vnc_lib.virtual_network_update(vn)
ExpectedPerms = {'admin':'RWX', 'alice':'RWX', 'bob':'R'}
for user in self.users:
perms = user.check_perms(vn.get_uuid())
self.assertEquals(perms, ExpectedPerms[user.name])
logger.info( 'Reading VN as bob ... should fail')
logger.info('')
logger.info( '########### WRITE (GLOBALLY SHARED) ##################')
logger.info( 'Enable virtual networks in alice project for global sharing (read, write)')
set_perms(vn, global_access = PERMS_RW)
alice.vnc_lib.virtual_network_update(vn)
ExpectedPerms = {'admin':'RWX', 'alice':'RWX', 'bob':'RW'}
for user in self.users:
perms = user.check_perms(vn.get_uuid())
self.assertEquals(perms, ExpectedPerms[user.name])
def tearDown(self):
self._api_svr_greenlet.kill()
self._api_server._db_conn._msgbus.shutdown()
test_utils.FakeIfmapClient.reset()
test_utils.CassandraCFs.reset()
super(TestPermissions, self).tearDown()
# end tearDown
|
{
"content_hash": "475f393c0e4f65f4ec2f2b29e7755282",
"timestamp": "",
"source": "github",
"line_count": 735,
"max_line_length": 133,
"avg_line_length": 40.83537414965986,
"alnum_prop": 0.5864929699473579,
"repo_name": "sajuptpm/contrail-controller",
"id": "bf425b649beefe7d91974e13257657c4d21dfb30",
"size": "30083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/api-server/tests/test_perms2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "51767"
},
{
"name": "C++",
"bytes": "19050770"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Groff",
"bytes": "36777"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "5819"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Protocol Buffer",
"bytes": "6129"
},
{
"name": "Python",
"bytes": "4813021"
},
{
"name": "Shell",
"bytes": "81402"
},
{
"name": "Thrift",
"bytes": "40763"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
}
|
import hashlib
import json
import os
import re
import unittest
from typing import List
from urllib.error import HTTPError
from urllib.parse import parse_qsl, urlparse
from urllib.request import Request, urlopen
import responses
from auto_nag import logger
MOCKS_DIR = os.path.join(os.path.dirname(__file__), "tests/mocks")
class MockTestCase(unittest.TestCase):
"""
Mock responses from any webserver (through requests)
Register local responses when none are found
"""
mock_urls: List[str] = []
def setUp(self):
# Setup mock callbacks
for mock_url in self.mock_urls:
url_re = re.compile(rf"^{mock_url}")
responses.add_callback(
responses.GET,
url_re,
callback=self._request_callback,
content_type="application/json",
)
def _request_callback(self, request):
logger.debug("Mock request %s %s", request.method, request.url)
path = self._build_path(request.method, request.url)
if os.path.exists(path):
# Load local file
logger.info("Using mock file %s", path)
with open(path, "r") as file:
response = json.load(file)
else:
# Build from actual request
logger.info("Building mock file %s", path)
response = self._real_request(request)
# Save in local file for future use
with open(path, "w") as file:
file.write(json.dumps(response))
return (response["status"], response["headers"], response["body"])
def _build_path(self, method, url):
"""
Build a unique filename from method & url
"""
# Build directory to request
out = urlparse(url)
parts = [f"{out.scheme}_{out.hostname}"]
parts += filter(None, out.path.split("/"))
directory = os.path.join(MOCKS_DIR, *parts)
# Build sorted query filename
query = sorted(parse_qsl(out.query))
query = [f"""{k}={v.replace("/", "_")}""" for k, v in query if k != "date"]
query_str = "_".join(query)
# Use hashes to avoid too long names
if len(query_str) > 150:
hashed_query = hashlib.md5(query_str.encode("utf-8")).hexdigest()
query_str = f"{query_str[0:100]}_{hashed_query}"
filename = f"{method}_{query_str}.json"
# Build directory
if not os.path.isdir(directory):
try:
os.makedirs(directory)
except Exception as error:
logger.error("Concurrency error when building directories: %s", error)
return os.path.join(directory, filename)
def _real_request(self, request):
"""
Do a real request towards the target
to build a mockup, using low level urllib
Can't use requests: it's wrapped by unittest.mock
"""
# No gzip !
headers = {key.lower(): value for key, value in request.headers.items()}
if "accept-encoding" in headers:
del headers["accept-encoding"]
real_req = Request(
request.url, request.body, headers=headers, method=request.method
)
try:
resp = urlopen(real_req)
except HTTPError as error:
logger.error("HTTP Error saved for %s: %s", request.url, error)
return {"status": error.code, "headers": {}, "body": ""}
return {
"status": resp.code,
# TODO: fix cookie usage bug
# 'headers': dict(resp.getheaders()),
"headers": {},
"body": resp.read().decode("utf-8"),
}
|
{
"content_hash": "b70c7a2b062d85f4f5f2c257fb1f7605",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 86,
"avg_line_length": 31.887931034482758,
"alnum_prop": 0.5674506623411733,
"repo_name": "mozilla/relman-auto-nag",
"id": "75003140a4eafef22e0491b747d65f0c8c119908",
"size": "4010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auto_nag/auto_mock.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "74090"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "571790"
},
{
"name": "Shell",
"bytes": "11421"
}
],
"symlink_target": ""
}
|
"""
Integration tests for the Synapse Client for Python
To run all the tests : nosetests -vs tests
To run a single test suite: nosetests -vs tests/integration
To run a single test set : nosetests -vs tests/integration/integration_test_Entity.py
To run a single test : nosetests -vs tests/integration/integration_test_Entity.py:test_Entity
"""
from __future__ import unicode_literals
from __future__ import print_function
from builtins import str
import uuid
import os
import sys
import shutil
import six
from synapseclient import Entity, Project, Folder, File, Evaluation
import synapseclient
import synapseclient.utils as utils
def setup_module(module):
print("Python version:", sys.version)
syn = synapseclient.Synapse(debug=True, skip_checks=True)
print("Testing against endpoints:")
print(" " + syn.repoEndpoint)
print(" " + syn.authEndpoint)
print(" " + syn.fileHandleEndpoint)
print(" " + syn.portalEndpoint + "\n")
syn.login()
module.syn = syn
module._to_cleanup = []
# Make one project for all the tests to use
project = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project)
module.project = project
def teardown_module(module):
cleanup(module._to_cleanup)
def schedule_for_cleanup(item):
"""schedule a file of Synapse Entity to be deleted during teardown"""
globals()['_to_cleanup'].append(item)
def cleanup(items):
"""cleanup junk created during testing"""
for item in reversed(items):
if isinstance(item, Entity) or utils.is_synapse_id(item) or hasattr(item, 'deleteURI'):
try:
syn.delete(item)
except Exception as ex:
if hasattr(ex, 'response') and ex.response.status_code in [404, 403]:
pass
else:
print("Error cleaning up entity: " + str(ex))
elif isinstance(item, six.string_types):
if os.path.exists(item):
try:
if os.path.isdir(item):
shutil.rmtree(item)
else: #Assum that remove will work on antyhing besides folders
os.remove(item)
except Exception as ex:
print(ex)
else:
sys.stderr.write('Don\'t know how to clean: %s' % str(item))
|
{
"content_hash": "c1de0f2e0ac1d1a7421af4c8296048fa",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 98,
"avg_line_length": 31.76,
"alnum_prop": 0.6263643996641478,
"repo_name": "kkdang/synapsePythonClient",
"id": "2992e21ca81841119dc8aeddc09d45cd378f48b5",
"size": "2406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "659128"
}
],
"symlink_target": ""
}
|
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# auto generated:
'src/qt/bitcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
EXCLUDE_DIRS = [
# git subtrees
"src/crypto/ctaes/",
"src/leveldb/",
"src/secp256k1/",
"src/univalue/",
]
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.mm', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
for excluded_dir in EXCLUDE_DIRS:
if filename.startswith(excluded_dir):
return False
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files --full-name'.split(' ')
GIT_TOPLEVEL_CMD = 'git rev-parse --show-toplevel'.split(' ')
def call_git_ls(base_directory):
out = subprocess.check_output([*GIT_LS_CMD, base_directory])
return [f for f in out.decode("utf-8").split('\n') if f != '']
def call_git_toplevel():
"Returns the absolute path to the project root"
return subprocess.check_output(GIT_TOPLEVEL_CMD).strip().decode("utf-8")
def get_filenames_to_examine(base_directory):
"Returns an array of absolute paths to any project files in the base_directory that pass the include/exclude filters"
root = call_git_toplevel()
filenames = call_git_ls(base_directory)
return sorted([os.path.join(root, filename) for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = r'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile(r'%s %s,? %s( +\*)?\n' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
r"Satoshi Nakamoto",
r"The Bitcoin Core developers",
r"BitPay Inc\.",
r"University of Illinois at Urbana-Champaign\.",
r"Pieter Wuille",
r"Wladimir J\. van der Laan",
r"Jeff Garzik",
r"Jan-Klaas Kollhof",
r"ArtForz -- public domain half-a-node",
r"Intel Corporation ?",
r"The Zcash developers",
r"Jeremy Rubin",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(filename, 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
filenames = get_filenames_to_examine(base_directory)
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(filename, 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(filename, 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = r'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
for filename in get_filenames_to_examine(base_directory):
update_updatable_copyright(filename)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index is not None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the bitcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
|
{
"content_hash": "0ad8ee606fa5f35c6db8f638452e5e36",
"timestamp": "",
"source": "github",
"line_count": 594,
"max_line_length": 121,
"avg_line_length": 36.611111111111114,
"alnum_prop": 0.5675725387409758,
"repo_name": "FeatherCoin/Feathercoin",
"id": "67e77bc63de3205bdab3791b10a12acc9b46cd0e",
"size": "21962",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "contrib/devtools/copyright_header.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "928577"
},
{
"name": "C++",
"bytes": "6575402"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "207328"
},
{
"name": "Makefile",
"bytes": "122185"
},
{
"name": "Objective-C++",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "1651347"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Scheme",
"bytes": "6045"
},
{
"name": "Shell",
"bytes": "130582"
}
],
"symlink_target": ""
}
|
from .campos import CampoData
from .campos import CampoFixo
from .campos import CampoRegex
from .erros import CampoError
from .erros import CampoInexistenteError
class Registro(object):
"""
Classe abstrata para a manipulação dos registros.
>>> class RegistroTest(Registro):
... campos = [CampoFixo(1, 'REG', 'TEST'),
... CampoData(2, 'DT_INI'),
... CampoData(3, 'DT_FIM'),
... CampoRegex(4, 'RETIFICADORA', obrigatorio=True, regex='[SN]'),]
>>> line = '|ERRO|01012015||N|'
>>> r = RegistroTest(line)
Traceback (most recent call last):
...
sped.erros.CampoError: RegistroTest -> REG
>>> line = '|TEST|01012015||N|'
>>> r = RegistroTest(line)
>>> r.REG
'TEST'
>>> r.REG = '0000'
Traceback (most recent call last):
...
sped.erros.CampoFixoError: RegistroTest -> REG
>>> r.DT_INI
datetime.date(2015, 1, 1)
>>> r.DT_FIM
>>> from datetime import date
>>> r.DT_INI = date(2014, 2, 1)
>>> r.DT_INI
datetime.date(2014, 2, 1)
>>> r.DT_INI = '01012014'
Traceback (most recent call last):
...
sped.erros.FormatoInvalidoError: RegistroTest -> DT_INI
>>> r.DT_INI = ''
>>> r.DT_INI
>>> r.DT_INI = None
>>> r.DT_INI
>>> r.CAMPO_INEXISTENTE = ''
Traceback (most recent call last):
...
sped.erros.CampoInexistenteError: RegistroTest -> CAMPO_INEXISTENTE
>>> r.CAMPO_INEXISTENTE
Traceback (most recent call last):
...
sped.erros.CampoInexistenteError: RegistroTest -> CAMPO_INEXISTENTE
>>> r.RETIFICADORA
'N'
>>> r.RETIFICADORA='S'
>>> r.RETIFICADORA
'S'
>>> r.RETIFICADORA='0'
Traceback (most recent call last):
...
sped.erros.FormatoInvalidoError: RegistroTest -> RETIFICADORA
"""
campos = []
def __init__(self, line=None):
if not line:
self._valores = [''] * (len(self.campos) + 2)
for c in self.campos:
if isinstance(c, CampoFixo):
self._valores[c.indice] = c.valor
else:
self._valores = line.split('|')
for c in self.campos:
if isinstance(c, CampoFixo):
if self._valores[c.indice] != c.valor:
raise CampoError(self, c.nome)
@property
def campos(self):
return self.__class__.campos
@property
def valores(self):
return self._valores
def __getitem__(self, key):
campo = ([c for c in self.campos if c.indice == key or c.nome == key] or [None])[0]
if not campo:
raise CampoInexistenteError(self, key)
return campo.get(self)
def __setitem__(self, key, value):
campo = ([c for c in self.campos if c.indice == key or c.nome == key] or [None])[0]
if not campo:
raise CampoInexistenteError(self, key)
campo.set(self, value)
def __getattr__(self, name):
campo = ([c for c in self.campos if c.nome == name] or [None])[0]
if not campo:
raise CampoInexistenteError(self, name)
return campo.get(self)
def __setattr__(self, name, value):
if name.startswith('_'):
super(Registro, self).__setattr__(name, value)
return
campo = ([c for c in self.campos if c.nome == name] or [None])[0]
if not campo:
raise CampoInexistenteError(self, name)
campo.set(self, value)
def as_line(self):
return u'|'.join(self._valores)
|
{
"content_hash": "def91a7132a52908a51ad66bdecc7a9c",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 91,
"avg_line_length": 31.236842105263158,
"alnum_prop": 0.558270148834597,
"repo_name": "odoo-brazil/python-sped",
"id": "b8074e45963337030d12f57c4de29828f1a27245",
"size": "3588",
"binary": false,
"copies": "1",
"ref": "refs/heads/python-2.7",
"path": "sped/registros.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267400"
}
],
"symlink_target": ""
}
|
from model.contact import Contact
import re
class ContactsHelper:
def __init__(self,app):
self.app = app
def create(self, contact):
wd = self.app.wd
self.app.open_home_page()
# init contact addition
wd.find_element_by_link_text("add new").click()
# fill contact form
self.fill_form(contact)
# submit contact
wd.find_element_by_name("submit").click()
self.app.return_to_home_page()
self.rows_cache = None
def fill_form(self, contact):
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nick)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_tel)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mob_tel)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work_tel)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").send_keys(contact.email2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").send_keys("\\9")
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.birthday)
def delete_first_contact(self):
wd = self.app.wd
self.delete_contact_by_index(0)
def delete_contact_by_index (self, index):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_index(index)
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
self.rows_cache = None
def delete_contact_by_id (self, id):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(id)
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
self.rows_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_first_contact(self):
wd = self.app.wd
wd.find_elements_by_name("selected[]").click()
def modify_first_contact (self):
wd = self.app.wd
self.modify_contact_by_index (0)
def modify_contact_by_index(self, index, contact):
wd = self.app.wd
self.app.open_home_page()
wd.find_elements_by_xpath("//img[@title='Edit']")[index].click()
self.fill_form(contact)
wd.find_element_by_xpath("//input[@value='Update']").click()
self.app.return_to_home_page()
self.rows_cache = None
def count(self):
wd = self.app.wd
self.app.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
rows_cache = None
def get_contact_list(self):
if self.rows_cache is None:
wd = self.app.wd
self.app.open_home_page()
self.rows_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
address = cells[4].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
all_emails = cells[4].text
self.rows_cache.append(Contact(firstname=firstname, lastname=lastname, id=id, address=address, all_phones_from_home_page=all_phones,all_emails_from_home_page=all_emails ))
return list(self.rows_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index (index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home_tel = wd.find_element_by_name("home").get_attribute("value")
work_tel = wd.find_element_by_name("work").get_attribute("value")
mob_tel = wd.find_element_by_name("mobile").get_attribute("value")
secondary_tel = wd.find_element_by_name("phone2").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id, home_tel=home_tel, work_tel=work_tel, mob_tel=mob_tel, secondary_tel=secondary_tel)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home_tel = re.search("H:(.*)", text).group(1)
work_tel = re.search("W:(.*)", text).group(1)
mob_tel = re.search("M:(.*)", text).group(1)
secondary_tel = re.search("P:(.*)", text).group(1)
return Contact(home_tel=home_tel, work_tel=work_tel, mob_tel=mob_tel, secondary_tel=secondary_tel)
|
{
"content_hash": "b6f980d5b95eb53926eae14e31dc3fd9",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 187,
"avg_line_length": 42.241573033707866,
"alnum_prop": 0.6065966218912089,
"repo_name": "goeliv/python_training",
"id": "6f316d36296a1a85d11e440709d539cc7c211256",
"size": "7520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/contacts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33768"
}
],
"symlink_target": ""
}
|
'''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import os
import shutil
import re
from Common_HandlePrebuiltDep import *
from ARFuncs import *
def iOS_HandlePrebuiltDep(target, pb, forcedOutputDir=None, outputSuffixes=None, clean=False, debug=False):
args = dict(locals())
StartDumpArgs(**args)
Common_HandlePrebuiltDep(target, pb, forcedOutputDir=forcedOutputDir, outputSuffixes=outputSuffixes)
res = True
if not pb.isAvailableForTarget(target):
ARLog('Prebuilt library %(pb)s does not exists for target %(target)s' % locals())
else:
Type = pb.type
if Type == 'header_only':
if not clean:
# Make fat library
lib = pb
InstallDir = ARPathFromHere('Targets/%(target)s/Install/' % locals())
FrameworksDir = '%(InstallDir)s/Frameworks/' % locals()
Framework = '%(FrameworksDir)s/%(lib)s.framework' % locals()
FrameworkDbg = '%(FrameworksDir)s/%(lib)s_dbg.framework' % locals()
OutputDir = '%(InstallDir)s/lib/' % locals()
if not os.path.exists(OutputDir):
os.makedirs(OutputDir)
# Create framework
FinalFramework = Framework
if debug:
FinalFramework = FrameworkDbg
suffix = '_dbg' if debug else ''
FrameworkLib = '%(FinalFramework)s/%(lib)s%(suffix)s' % locals()
FrameworkHeaders = '%(FinalFramework)s/Headers/' % locals()
ARDeleteIfExists(FinalFramework)
os.makedirs(FinalFramework)
shutil.copytree('%(InstallDir)s/include/%(lib)s' % locals(), FrameworkHeaders)
elif Type == 'framework':
lib = pb
InstallDir = ARPathFromHere('Targets/%(target)s/Install/' % locals())
FrameworksDir = '%(InstallDir)s/Frameworks/' % locals()
Framework = '%(FrameworksDir)s/%(lib)s.framework' % locals()
FrameworkDbg = '%(FrameworksDir)s/%(lib)s_dbg.framework' % locals()
if not os.path.exists(FrameworksDir):
os.makedirs(FrameworksDir)
# Copy framework
FinalFramework = Framework if not debug else FrameworkDbg
suffix = '_dbg' if debug else ''
prefix = 'lib' if not lib.name.startswith('lib') else ''
ARLog(str(locals()))
ARCopyAndReplace(pb.path,FinalFramework, deletePrevious=True)
else:
ARLog('Do not know how to handle prebuilts of type %(Type)s in iOS' % locals())
res = False
return EndDumpArgs(res, **args)
|
{
"content_hash": "f564b4c701803ca982a42e8e722e07c4",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 107,
"avg_line_length": 46.82222222222222,
"alnum_prop": 0.6409587090650214,
"repo_name": "2016-Capstone/PythonController",
"id": "b671ebf4d926fc2fe80bd54cfcce7c028c0178b2",
"size": "4214",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "arsdk-xml/ARSDKBuildUtils/Utils/Python/iOS_HandlePrebuiltDep.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "79673"
},
{
"name": "Java",
"bytes": "4259"
},
{
"name": "M4",
"bytes": "4361"
},
{
"name": "Makefile",
"bytes": "8645"
},
{
"name": "Python",
"bytes": "706882"
},
{
"name": "Shell",
"bytes": "23045"
}
],
"symlink_target": ""
}
|
from datetime import date
from decimal import Decimal
from ssl import SSLError
from suds import WebFault
from suds.client import Client
from authorize.exceptions import AuthorizeConnectionError, \
AuthorizeInvalidError, AuthorizeResponseError
PROD_URL = 'https://api.authorize.net/soap/v1/Service.asmx?WSDL'
TEST_URL = 'https://apitest.authorize.net/soap/v1/Service.asmx?WSDL'
class RecurringAPI(object):
def __init__(self, login_id, transaction_key, debug=True, test=False):
self.url = TEST_URL if debug else PROD_URL
self.login_id = login_id
self.transaction_key = transaction_key
@property
def client(self):
# Lazy instantiation of SOAP client, which hits the WSDL url
if not hasattr(self, '_client'):
self._client = Client(self.url)
return self._client
@property
def client_auth(self):
if not hasattr(self, '_client_auth'):
self._client_auth = self.client.factory.create(
'MerchantAuthenticationType')
self._client_auth.name = self.login_id
self._client_auth.transactionKey = self.transaction_key
return self._client_auth
def _make_call(self, service, *args):
# Provides standard API call error handling
method = getattr(self.client.service, service)
try:
response = method(self.client_auth, *args)
except (WebFault, SSLError) as e:
raise AuthorizeConnectionError(e)
if response.resultCode != 'Ok':
error = response.messages[0][0]
raise AuthorizeResponseError('%s: %s' % (error.code, error.text))
return response
def create_subscription(self, credit_card, amount, start,
days=None, months=None, occurrences=None, trial_amount=None,
trial_occurrences=None):
"""
Creates a recurring subscription payment on the CreditCard provided.
``credit_card``
The CreditCard instance to create the subscription for.
Subscriptions require that you provide a first and last name with
the credit card.
``amount``
The amount to charge every occurrence, either as an int, float,
or Decimal.
``start``
The date to start the subscription, as a date object.
``days``
Provide either the days or the months argument to indicate the
interval at which the subscription should recur.
``months``
Provide either the days or the months argument to indicate the
interval at which the subscription should recur.
``occurrences``
If provided, this is the number of times to charge the credit card
before ending. If not provided, will last until canceled.
``trial_amount``
If you want to have a trial period at a lower amount for this
subscription, provide the amount. (Either both trial arguments
should be provided, or neither.)
``trial_occurrences``
If you want to have a trial period at a lower amount for this
subscription, provide the number of occurences the trial period
should last for. (Either both trial arguments should be provided,
or neither.)
"""
subscription = self.client.factory.create('ARBSubscriptionType')
# Add the basic amount and payment fields
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
subscription.amount = str(amount)
payment_type = self.client.factory.create('PaymentType')
credit_card_type = self.client.factory.create('CreditCardType')
credit_card_type.cardNumber = credit_card.card_number
credit_card_type.expirationDate = '{0}-{1:0>2}'.format(
credit_card.exp_year, credit_card.exp_month)
credit_card_type.cardCode = credit_card.cvv
payment_type.creditCard = credit_card_type
subscription.payment = payment_type
if not (credit_card.first_name and credit_card.last_name):
raise AuthorizeInvalidError('Subscriptions require first name '
'and last name to be provided with the credit card.')
subscription.billTo.firstName = credit_card.first_name
subscription.billTo.lastName = credit_card.last_name
# Add the fields for the payment schedule
if (days and months) or not (days or months):
raise AuthorizeInvalidError('Please provide either the months or '
'days argument to define the subscription interval.')
if days:
try:
days = int(days)
assert days >= 7 and days <= 365
except (AssertionError, ValueError):
raise AuthorizeInvalidError('The interval days must be an '
'integer value between 7 and 365.')
subscription.paymentSchedule.interval.unit = \
self.client.factory.create('ARBSubscriptionUnitEnum').days
subscription.paymentSchedule.interval.length = days
elif months:
try:
months = int(months)
assert months >= 1 and months <= 12
except (AssertionError, ValueError):
raise AuthorizeInvalidError('The interval months must be an '
'integer value between 1 and 12.')
subscription.paymentSchedule.interval.unit = \
self.client.factory.create('ARBSubscriptionUnitEnum').months
subscription.paymentSchedule.interval.length = months
if start < date.today():
raise AuthorizeInvalidError('The start date for the subscription '
'may not be in the past.')
subscription.paymentSchedule.startDate = start.strftime('%Y-%m-%d')
if occurrences is None:
occurrences = 9999 # That's what they say to do in the docs
subscription.paymentSchedule.totalOccurrences = occurrences
# If a trial period has been specified, add those fields
if trial_amount and trial_occurrences:
subscription.paymentSchedule.trialOccurrences = trial_occurrences
trial_amount = Decimal(str(trial_amount))
trial_amount = trial_amount.quantize(Decimal('0.01'))
subscription.trialAmount = str(trial_amount)
elif trial_amount or trial_occurrences:
raise AuthorizeInvalidError('To indicate a trial period, you '
'must provide both a trial amount and occurrences.')
# Make the API call to create the subscription
response = self._make_call('ARBCreateSubscription', subscription)
return response.subscriptionId
def update_subscription(self, subscription_id, amount=None, start=None,
occurrences=None, trial_amount=None, trial_occurrences=None):
"""
Updates an existing recurring subscription payment. All fields to
update are optional, and only the provided fields will be udpated.
Many of the fields have particular restrictions that must be followed,
as noted below.
``subscription_id``
The subscription ID returned from the original create_subscription
call for the subscription you want to update.
``amount``
The updated amount to charge every occurrence, either as an int,
float, or Decimal.
``start``
The updated date to start the subscription, as a date object. This
may only be udpated if no successful payments have been completed.
``occurrences``
This updates the number of times to charge the credit card before
ending.
``trial_amount``
Updates the amount charged during the trial period. This may only
be updated if you have not begun charging at the regular price.
``trial_occurrences``
Updates the number of occurrences for the trial period. This may
only be updated if you have not begun charging at the regular
price.
"""
subscription = self.client.factory.create('ARBSubscriptionType')
# Add the basic subscription updates
if amount:
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
subscription.amount = str(amount)
if start and start < date.today():
raise AuthorizeInvalidError('The start date for the subscription '
'may not be in the past.')
if start:
subscription.paymentSchedule.startDate = start.strftime('%Y-%m-%d')
if occurrences:
subscription.paymentSchedule.totalOccurrences = occurrences
if trial_amount:
trial_amount = Decimal(str(trial_amount))
trial_amount = trial_amount.quantize(Decimal('0.01'))
subscription.trialAmount = str(trial_amount)
if trial_occurrences:
subscription.paymentSchedule.trialOccurrences = trial_occurrences
# Make the API call to update the subscription
self._make_call('ARBUpdateSubscription', subscription_id,
subscription)
def delete_subscription(self, subscription_id):
"""
Deletes an existing recurring subscription payment.
``subscription_id``
The subscription ID returned from the original create_subscription
call for the subscription you want to delete.
"""
self._make_call('ARBCancelSubscription', subscription_id)
|
{
"content_hash": "d9ac541897026bc3a86969c275ad860f",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 43.49321266968326,
"alnum_prop": 0.63722430295464,
"repo_name": "jeffschenck/authorizesauce",
"id": "d18f397e1d40ee9dcf6080d14ffcf201f07738c7",
"size": "9612",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "authorize/apis/recurring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105616"
}
],
"symlink_target": ""
}
|
USERNAME = ""
# These tokens can be generated here: https://github.com/settings/tokens/new
AUTH = "########################################"
IGNORE_FILES = [
'.ipynb_checkpoints',
'.gitignore'
]
|
{
"content_hash": "6ece02a922c2e2d37085815dd0a6534a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 76,
"avg_line_length": 25.5,
"alnum_prop": 0.5294117647058824,
"repo_name": "openconnectome/ndprojects",
"id": "ba5a72547a32c4465ed9ba03a4824e4e6a4b8831",
"size": "236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_code/config.example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "34593"
},
{
"name": "Python",
"bytes": "2526"
}
],
"symlink_target": ""
}
|
class InvalidRepositoryError(ValueError):
pass
class InvalidContentFileEncoding(ValueError):
pass
|
{
"content_hash": "8135961dd3256f5405c31a0ca68851b4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 45,
"avg_line_length": 18,
"alnum_prop": 0.7962962962962963,
"repo_name": "muffins-on-dope/bakery",
"id": "bc530a32ae83ea81b679fe4992a1fc3eea7ad00a",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bakery/cookies/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47835"
},
{
"name": "JavaScript",
"bytes": "1726"
},
{
"name": "Python",
"bytes": "72630"
},
{
"name": "Shell",
"bytes": "5096"
}
],
"symlink_target": ""
}
|
"""
Code for splitting the terms.
"""
import string
import logging
logger = logging.getLogger('cfl.preprocessing')
def tokenize(s):
return s.split()
def to_unicode(document, info=[]):
document = document.replace('\x00', ' ') # remove nulls
document = document.strip()
if not isinstance(document, unicode):
for codec in ['utf8', 'latin1', 'ascii']:
try:
return unicode(document, encoding=codec)
except UnicodeDecodeError as e:
logger.debug('%s %s %s' % (codec, str(e), ' '.join(info)))
return document
def split(iterator):
for token in iterator:
word = u''
for char in token:
if char.isupper() and all(map(lambda x: x.isupper(), word)):
# keep building if word is currently all uppercase
word += char
elif char.islower() and all(map(lambda x: x.isupper(), word)):
# stop building if word is currently all uppercase,
# but be sure to take the first letter back
if len(word) > 1:
yield word[:-1]
word = word[-1]
word += char
elif char.islower() and any(map(lambda x: x.islower(), word)):
# keep building if the word is has any lowercase
# (word came from above case)
word += char
elif char.isdigit() and all(map(lambda x: x.isdigit(), word)):
# keep building if all of the word is a digit so far
word += char
elif char in string.punctuation:
if len(word) > 0:
yield word
word = u''
# always yield punctuation as a single token
yield char
else:
if len(word) > 0:
yield word
word = char
if len(word) > 0:
yield word
def remove_stops(iterator, stopwords=set(), punctuation=True, digits=True,
whitespace=True):
if not isinstance(stopwords, set):
stopwords = set(stopwords)
if punctuation:
stopwords.update(string.punctuation)
if digits:
stopwords.update(string.digits)
if whitespace:
stopwords.update(string.whitespace)
stopwords.update([''])
for word in filter(lambda x: x not in stopwords, iterator):
try:
int(word)
float(word)
except ValueError:
yield word
FOX_STOPS = set(
""" a about above across after again against all almost alone along already
also although always among an and another any anybody anyone anything
anywhere are area areas around as ask asked asking asks at away b back
backed backing backs be because become becomes became been before began
behind being beings best better between big both but by c came can cannot
case cases certain certainly clear clearly come could d did differ different
differently do does done down downed downing downs during e each early
either end ended ending ends enough even evenly ever every everybody
everyone everything everywhere f face faces fact facts far felt few find
finds first for four from full fully further furthered furthering furthers
g gave general generally get gets give given gives go going good goods got
great greater greatest group grouped grouping groups h had has have having
he her herself here high higher highest him himself his how however i if
important in interest interested interesting interests into is it its itself
j just k keep keeps kind knew know known knows l large largely last later
latest least less let lets like likely long longer longest m made make
making man many may me member members men might more most mostly mr mrs much
must my myself n necessary need needed needing needs never new newer newest
next no non not nobody noone nothing now nowhere number numbered numbering
numbers o of off often old older oldest on once one only open opened opening
opens or order ordered ordering orders other others our out over p part
parted parting parts per perhaps place places point pointed pointing points
possible present presented presenting presents problem problems put puts
q quite r rather really right room rooms s said same saw say says second
seconds see sees seem seemed seeming seems several shall she should show
showed showing shows side sides since small smaller smallest so some
somebody someone something somewhere state states still such sure t take
taken than that the their them then there therefore these they thing things
think thinks this those though thought thoughts three through thus to today
together too took toward turn turned turning turns two u under until up upon
us use uses used v very w want wanted wanting wants was way ways we well
wells went were what when where whether which while who whole whose why will
with within without work worked working works would x y year years yet you
young younger youngest your yours z """.split())
JAVA_RESERVED = set(
""" abstract assert boolean break byte case catch char class const continue
default do double else enum extends false final finally float for goto if
implements import instanceof int interface long native new null package
private protected public return short static strictfp super switch
synchronized this throw throws transient true try void volatile while """.split())
|
{
"content_hash": "634682fa2a0de6060f7a6ed7118f2e14",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 86,
"avg_line_length": 41.577777777777776,
"alnum_prop": 0.6652414038838411,
"repo_name": "cscorley/changeset-feature-location",
"id": "7da1ca1c1f6596da38665e4bef8e9fc7d9b4a36f",
"size": "5812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/preprocessing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Groff",
"bytes": "139"
},
{
"name": "Logos",
"bytes": "49621"
},
{
"name": "Makefile",
"bytes": "2201"
},
{
"name": "Python",
"bytes": "108113"
},
{
"name": "Shell",
"bytes": "666"
},
{
"name": "TeX",
"bytes": "616130"
}
],
"symlink_target": ""
}
|
from bottle import *
def print_log():
open_file = open("mydaily.log")
log = open_file.read()
return log
@route('/text')
def input():
return'''
<form action="/text" method="post">
<p>请输入单行日记: <input type="text" name="fname" /></p>
<input type="submit" value="确认" />
</form>
'''
@route('/log')
def history():
log = print_log()
#return template('history.tpl', history = log)
return template('{{history}}',history = log)
if __name__ == '__main__':
debug(True)
run(host='localhost', port=8080, reloader=True)
|
{
"content_hash": "6c401f87059c733c30ffeb95680443d8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 58,
"avg_line_length": 20.857142857142858,
"alnum_prop": 0.5582191780821918,
"repo_name": "yangshaoshun/OMOOC2py",
"id": "c110279d40472cda5996efae01288751d18868a4",
"size": "626",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_src/om2py4w/4wex3/te.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "192"
},
{
"name": "HTML",
"bytes": "2362"
},
{
"name": "Python",
"bytes": "41130"
},
{
"name": "Smarty",
"bytes": "5248"
}
],
"symlink_target": ""
}
|
"""
Example of train a 2-layers Neural Network classifier on CIFAR-10 dataset
"""
import numpy as np
import sys
sys.path.append("../../deuNet/")
from deuNet.utils import np_utils
from deuNet.datasets import cifar10
from deuNet.models import NN
from deuNet.layers.core import AffineLayer, Dropout
from deuNet.layers.convolutional import Convolution2D,Flatten,MaxPooling2D
from deuNet.layers.batch_normalization import BatchNormalization
import pdb
np.random.seed(1984)
batch_size = 32
nb_classes = 10
nb_epoch = 100
learning_rate = 0.01
w_scale = 1e-2
momentum = 0.9
lr_decay = 1e-7
nesterov = True
rho = 0.9
reg_W = 0.
checkpoint_fn = '.trained_cifar10_cnn.h5'
(train_X, train_y), (test_X, test_y) = cifar10.load_data()
valid_X,valid_y = test_X, test_y
# convert data_y to one-hot
train_y = np_utils.one_hot(train_y, nb_classes)
valid_y = np_utils.one_hot(valid_y, nb_classes)
test_y = np_utils.one_hot(test_y, nb_classes)
train_X = train_X.astype("float32")
valid_X = valid_X.astype("float32")
test_X = test_X.astype("float32")
train_X /= 255
valid_X /= 255
test_X /= 255
# NN architecture
model = NN(checkpoint_fn)
model.add(Convolution2D(32,3,3,3, border_mode='full',
init='glorot_uniform',activation='relu', reg_W=reg_W))
model.add(Convolution2D(32,32,3,3, border_mode='valid',
init='glorot_uniform',activation='relu', reg_W=reg_W))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25, uncertainty=False))
model.add(Convolution2D(64,32,3,3, border_mode='full',
init='glorot_uniform',activation='relu', reg_W=reg_W))
model.add(Convolution2D(64,64,3,3, border_mode='valid',
init='glorot_uniform',activation='relu', reg_W=reg_W))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25,uncertainty=False))
model.add(Flatten())
model.add(AffineLayer(8*8*64, 512,activation='relu',reg_W=reg_W, init='glorot_uniform'))
model.add(Dropout(0.5, uncertainty=False))
model.add(AffineLayer(512, nb_classes,activation='softmax',reg_W=reg_W,init='glorot_uniform'))
# Compile NN
print 'Compile NN ...'
model.compile(optimizer='SGD', loss='categorical_crossentropy',
reg_type='L2', learning_rate = learning_rate, momentum=momentum,
lr_decay=lr_decay, nesterov=nesterov, rho=rho)
# Train NN
model.fit(train_X, train_y, valid_X, valid_y,
batch_size=batch_size, nb_epoch=nb_epoch, verbose=True)
# Test NN
model.get_test_accuracy(test_X, test_y)
|
{
"content_hash": "533f8b907d785cfdedaf2029a5deb169",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 94,
"avg_line_length": 29.365853658536587,
"alnum_prop": 0.7196843853820598,
"repo_name": "shenxudeu/deuNet",
"id": "669e321b062f29cb19998029beac3c61de94394f",
"size": "2408",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demos/cifar_10_cnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "70698"
},
{
"name": "Python",
"bytes": "226064"
}
],
"symlink_target": ""
}
|
import logging
import os
import shutil
import subprocess
from collections import namedtuple
from os.path import basename
log = logging.getLogger(__name__)
class VCSVersion(object):
"""
Represents a Version (tag or branch) in a VCS.
This class should only be instantiated in BaseVCS subclasses.
It can act as a context manager to temporarily switch to this tag (eg to
build docs for this tag).
"""
def __init__(self, repository, identifier, verbose_name):
self.repository = repository
self.identifier = identifier
self.verbose_name = verbose_name
def __repr__(self):
return "<VCSVersion: %s:%s" % (self.repository.repo_url,
self.verbose_name)
class VCSProject(namedtuple("VCSProject",
"name default_branch working_dir repo_url")):
"""Transient object to encapsulate a projects stuff"""
pass
class BaseCLI(object):
"""
Helper class for CLI-heavy classes.
"""
log_tmpl = u'VCS[{name}:{ident}]: {args}'
def __call__(self, *args):
return self.run(args)
def run(self, *args):
"""
:param bits: list of command and args. See `subprocess` docs
"""
process = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.working_dir, shell=False,
env=self.env)
try:
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
name=self.name,
args=' '.join(args)))
except UnicodeDecodeError:
# >:x
pass
stdout, stderr = process.communicate()
try:
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
name=self.name,
args=stdout))
except UnicodeDecodeError:
# >:x
pass
return (process.returncode, stdout, stderr)
@property
def env(self):
return os.environ.copy()
class BaseVCS(BaseCLI):
"""
Base for VCS Classes.
Built on top of the BaseCLI.
"""
supports_tags = False # Whether this VCS supports tags or not.
supports_branches = False # Whether this VCS supports branches or not.
#==========================================================================
# General methods
#==========================================================================
def __init__(self, project, version, **kwargs):
self.default_branch = project.default_branch
self.name = project.name
self.repo_url = project.repo_url
self.working_dir = project.working_dir
def check_working_dir(self):
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
def make_clean_working_dir(self):
"Ensures that the working dir exists and is empty"
shutil.rmtree(self.working_dir, ignore_errors=True)
self.check_working_dir()
def update(self):
"""
If self.working_dir is already a valid local copy of the repository,
update the repository, else create a new local copy of the repository.
"""
self.check_working_dir()
#==========================================================================
# Tag / Branch related methods
# These methods only apply if supports_tags = True and/or
# support_branches = True
#==========================================================================
@property
def tags(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
@property
def branches(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
@property
def commit(self):
"""
Returns a string representing the current commit.
"""
raise NotImplementedError
def checkout(self, identifier=None):
"""
Set the state to the given identifier.
If identifier is None, checkout to the latest revision.
The type and format of identifier may change from VCS to VCS, so each
backend is responsible to understand it's identifiers.
"""
self.check_working_dir()
|
{
"content_hash": "fa3ec98297fa8d26ef8260fd91739a95",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 79,
"avg_line_length": 30.566666666666666,
"alnum_prop": 0.5378407851690294,
"repo_name": "cgourlay/readthedocs.org",
"id": "fb3c7f7959bb2095728a1441bd02be91b9543395",
"size": "4585",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "readthedocs/vcs_support/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "65384"
},
{
"name": "HTML",
"bytes": "212314"
},
{
"name": "JavaScript",
"bytes": "1433027"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1548783"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
}
|
import logging
from IPython.display import display
import numpy as np
import pandas as pd
logger = logging.getLogger()
def cohort_view_table(df,
category_label="category_label",
category_order="category_order",
flags=[],
flag_display_labels=[],
add_percentages=True):
''' Generate a DataFrame showing counts and percentages
of subsets (defined by flags), stratified by categories.
For instance, each row (category) may be a selectivity
bucket, and each column can be the number of compounds in
that bucket that passed a given threshold. A "Total"
column shows the total number of compounds in each
bucket and a grand total sums them all up.
@param df: DataFrame where each row is a compound and
columns are various metrics and flags
@kwarg category_label: name of the column that defines
a category. The data is stratified based on this fieild.
@kwarg category_order: order in which the categories should
be displayed as rows of the table. There should be a one
to one correspondence between category_label and category_order.
@kwarg flags: list of column names defining binary flags.
These flags define subsets that will be counted and displayed
as columns of the output table.
@kwarg flag_display_labels: string labels for output columns
corresonding to flags
@kwarg add_percentages: whether to display percentages
alongside the counts.
'''
assert len(flags) == len(flag_display_labels), '"flags" and "flag_display_labels" should have the same length'
df['Total'] = 1
columns = ['Total'] + flags
df = (
df
.groupby([category_order, category_label])[columns]
.sum()
.sort_index(axis=0, level=category_order)
.reset_index(level=[category_order])
.drop(columns=category_order)
)
column_names = ["Total"] + flag_display_labels
df.columns = column_names
df.index.names = ['Category']
df = df.T
num_categories = len(df.columns)
logger.info("num_categories: {}".format(num_categories))
# Test comopound fields
cpd_fields = [c for c in df.columns if 'Test subset' in c]
if len(cpd_fields) != 0:
df['Test Compounds Total'] = df[cpd_fields].sum(1)
df['Grand Total'] = df.iloc[:, :num_categories].sum(1)
df = df.T
df.index.name = None
if add_percentages:
df = df.transform(_add_row_percentages, axis=1)
return df
def _fmt_total_percentages(x, total):
'''
Formatting function for DataFrame.Style. Formats the
"Total" column to show percentages.
'''
s = '''<span style="width:50%;float: left;text-align:right;font-weight:bold">{:,d} </span>
<span style="font-size:1em;color:#FF7043;width:50%;text-align:left;float: right;padding-left:1em;font-weight:bold">
({:.0%})</span>'''.format(int(x), float(x) / total)
return s
def _add_row_percentages(s):
'''Convert all columns except for "Total" to a string
that shows the integer count as well as the percentage
of Total within the row.'''
s = s + 0
index = s.index
assert "Total" in index
total = s['Total']
for label, x in s.iteritems():
if label == "Total":
continue
s[label] = '''<span style="width:50%;float: left;text-align:right">{:,d} </span>
<span style="font-size:1em;color:#888888;width:50%;text-align:left;float: right;padding-left:1em">
({:.0%})</span>'''.format(int(x), float(x) / total)
return s
def display_cohort_stats_table(table, barplot_column):
font_family = "Roboto"
idx = pd.IndexSlice
# indexes of the rows corresponding to categories, exludes
# the last "total" sums
group_ids = [x for x in table.index if 'Total' not in x]
barplot_max = table.loc[group_ids, barplot_column].sum()
# Sum of numbers in Total column (excluding Grand Total, obviously)
total = table.loc['Grand Total', 'Total']
table_stylized = (
table
.style
.format(
lambda s: _fmt_total_percentages(s, total),
subset=pd.IndexSlice[:, 'Total']
)
.applymap(lambda x : 'text-align:center;')
.applymap(lambda x: "border-left:solid thin #d65f5f", subset=idx[:, barplot_column])
.bar(subset=idx[group_ids, barplot_column], color='#FFDACF', vmin=0, vmax=barplot_max)
.applymap(lambda x: "padding:0.5em 1em 0.5em 1em")
.applymap(lambda x: "background:#444;color:white;border:solid thin #000;font-weight:bold", subset=idx['Grand Total', :])
.applymap(lambda x: "border-left:solid thin #ddd", subset=idx[:, 'Total'])
.set_table_styles(
[
{'selector' : 'table',
'props' : [('font-family', font_family), ('font-size', '30px'), ('border', 'solid thin #999')]
},
{'selector' : 'thead, tbody', 'props' : [
('border', 'solid 1px #ddd'),
]
},
{'selector' :
'thead', 'props' : [
('border-bottom', 'solid 2px #ddd'),
('border-top', 'solid 2px #ddd'),
('background', '#fefefe'), ('text-align', 'center'),
('font-family', font_family),
('font-size' , '1em')
]
},
{'selector' : 'th',
'props' : [
('text-align', 'center'),
('color' , '#444'),
]
},
{'selector' : 'th.col_heading',
'props' : [
('max-width', '8em')
]
},
{'selector' : 'th:not(.blank)',
'props' : [
# ('border-left','solid thin #ddd'),
# ('border-right','solid thin #ddd'),
]
},
{'selector' : 'tbody', 'props' : [
('text-align', 'center'), ('background', '#fff'), ('font-size' , '1.em'),
('font-family', font_family)]},
{'selector' : '.row_heading',
'props' : [('border-right', 'solid thin #ddd'), ('text-align', 'left')]}
]
)
)
if 'Test Compounds Total' in table.index:
table_stylized = table_stylized.applymap(lambda x: "border-top:solid thin #aaa", subset=idx['Test Compounds Total', :])
return table_stylized
|
{
"content_hash": "f4f630e3a4b7c4db0e6d6cf0f0486a19",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 128,
"avg_line_length": 39.25,
"alnum_prop": 0.5486594578580951,
"repo_name": "cmap/cmapPy",
"id": "0b28545c2ddffd08be482201e61a28e69d8fb2a8",
"size": "6753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmapPy/visualization/cohort_view.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "178"
},
{
"name": "Jupyter Notebook",
"bytes": "96474"
},
{
"name": "Python",
"bytes": "492786"
}
],
"symlink_target": ""
}
|
import unittest
import jpyutil
jpyutil.init_jvm(jvm_maxmem='512M')
import jpy
class TestJavaArrays(unittest.TestCase):
def test_diag_flags_constants(self):
self.assertIsNotNone(jpy.diag)
self.assertIsNotNone(jpy.diag.flags)
self.assertEqual(jpy.diag.F_OFF, 0x00)
self.assertEqual(jpy.diag.F_TYPE, 0x01)
self.assertEqual(jpy.diag.F_METH, 0x02)
self.assertEqual(jpy.diag.F_EXEC, 0x04)
self.assertEqual(jpy.diag.F_MEM, 0x08)
self.assertEqual(jpy.diag.F_JVM, 0x10)
self.assertEqual(jpy.diag.F_ERR, 0x20)
self.assertEqual(jpy.diag.F_ALL, 0xff)
def test_diag_flags_value(self):
self.assertIsNotNone(jpy.diag)
self.assertEqual(jpy.diag.flags, 0)
jpy.diag.flags = 1
self.assertEqual(jpy.diag.flags, 1)
jpy.diag.flags = 0
self.assertEqual(jpy.diag.flags, 0)
jpy.diag.flags = jpy.diag.F_EXEC + jpy.diag.F_MEM
self.assertEqual(jpy.diag.flags, 12)
jpy.diag.flags = 0
self.assertEqual(jpy.diag.flags, 0)
jpy.diag.flags += jpy.diag.F_EXEC
jpy.diag.flags += jpy.diag.F_MEM
self.assertEqual(jpy.diag.flags, 12)
if __name__ == '__main__':
print('\nRunning ' + __file__)
unittest.main()
|
{
"content_hash": "9fb186c3ec3406d17face66952371217",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 57,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.6322378716744914,
"repo_name": "bcdev/jpy",
"id": "264e6ba5df18e5664df4515569840881f5757198",
"size": "1278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/python/jpy_diag_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2659"
},
{
"name": "C",
"bytes": "337429"
},
{
"name": "C++",
"bytes": "4107"
},
{
"name": "Java",
"bytes": "162373"
},
{
"name": "Python",
"bytes": "1528601"
},
{
"name": "Shell",
"bytes": "2449"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from polymorphic import PolymorphicModel
from django.db.models import F
from django.contrib.admin.models import User
from jenkins import get_job_status
from .alert import send_alert
from .calendar import get_events
from .graphite import parse_metric
from .alert import send_alert
from .tasks import update_service
from datetime import datetime, timedelta
from django.utils import timezone
import json
import re
import time
import requests
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
CHECK_TYPES = (
('>', 'Greater than'),
('>=', 'Greater than or equal'),
('<', 'Less than'),
('<=', 'Less than or equal'),
('==', 'Equal to'),
)
def serialize_recent_results(recent_results):
if not recent_results:
return ''
def result_to_value(result):
if result.succeeded:
return '1'
else:
return '-1'
vals = [result_to_value(r) for r in recent_results]
vals.reverse()
return ','.join(vals)
def calculate_debounced_passing(recent_results, debounce=0):
"""
`debounce` is the number of previous failures we need (not including this)
to mark a search as passing or failing
Returns:
True if passing given debounce factor
False if failing
"""
if not recent_results:
return True
debounce_window = recent_results[:debounce + 1]
for r in debounce_window:
if r.succeeded:
return True
return False
class Service(models.Model):
PASSING_STATUS = 'PASSING'
WARNING_STATUS = 'WARNING'
ERROR_STATUS = 'ERROR'
CRITICAL_STATUS = 'CRITICAL'
CALCULATED_PASSING_STATUS = 'passing'
CALCULATED_INTERMITTENT_STATUS = 'intermittent'
CALCULATED_FAILING_STATUS = 'failing'
STATUSES = (
(CALCULATED_PASSING_STATUS, CALCULATED_PASSING_STATUS),
(CALCULATED_INTERMITTENT_STATUS, CALCULATED_INTERMITTENT_STATUS),
(CALCULATED_FAILING_STATUS, CALCULATED_FAILING_STATUS),
)
IMPORTANCES = (
(WARNING_STATUS, 'Warning'),
(ERROR_STATUS, 'Error'),
(CRITICAL_STATUS, 'Critical'),
)
name = models.TextField()
url = models.TextField(
blank=True,
help_text="URL of service."
)
users_to_notify = models.ManyToManyField(
User,
blank=True,
help_text='Users who should receive alerts.',
)
status_checks = models.ManyToManyField(
'StatusCheck',
blank=True,
help_text='Checks used to calculate service status.',
)
last_alert_sent = models.DateTimeField(
null=True,
blank=True,
)
email_alert = models.BooleanField(default=False)
hipchat_alert = models.BooleanField(default=True)
sms_alert = models.BooleanField(default=False)
telephone_alert = models.BooleanField(
default=False,
help_text='Must be enabled, and check importance set to Critical, to receive telephone alerts.',
)
alerts_enabled = models.BooleanField(
default=True,
help_text='Alert when this service is not healthy.',
)
overall_status = models.TextField(default=PASSING_STATUS)
old_overall_status = models.TextField(default=PASSING_STATUS)
hackpad_id = models.TextField(
null=True,
blank=True,
verbose_name='Recovery instructions',
help_text='Gist, Hackpad or Refheap js embed with recovery instructions e.g. https://you.hackpad.com/some_document.js'
)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = ServiceStatusSnapshot(
service=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
if not (self.overall_status == Service.PASSING_STATUS and self.old_overall_status == Service.PASSING_STATUS):
self.alert()
def most_severe(self, check_list):
failures = [c.importance for c in check_list]
if self.CRITICAL_STATUS in failures:
return self.CRITICAL_STATUS
if self.ERROR_STATUS in failures:
return self.ERROR_STATUS
if self.WARNING_STATUS in failures:
return self.WARNING_STATUS
return self.PASSING_STATUS
@property
def is_critical(self):
"""
Break out separately because it's a bit of a pain to
get wrong.
"""
if self.old_overall_status != self.CRITICAL_STATUS and self.overall_status == self.CRITICAL_STATUS:
return True
return False
def alert(self):
if not self.alerts_enabled:
return
if self.overall_status != self.PASSING_STATUS:
# Don't alert every time
if self.overall_status == self.WARNING_STATUS:
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.NOTIFICATION_INTERVAL)) < self.last_alert_sent:
return
elif self.overall_status in (self.CRITICAL_STATUS, self.ERROR_STATUS):
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.ALERT_INTERVAL)) < self.last_alert_sent:
return
self.last_alert_sent = timezone.now()
else:
# We don't count "back to normal" as an alert
self.last_alert_sent = None
self.save()
self.snapshot.did_send_alert = True
self.snapshot.save()
# send_alert handles the logic of how exactly alerts should be handled
send_alert(self, duty_officers=get_duty_officers())
@property
def recent_snapshots(self):
snapshots = self.snapshots.filter(
time__gt=(timezone.now() - timedelta(minutes=60 * 24)))
snapshots = list(snapshots.values())
for s in snapshots:
s['time'] = time.mktime(s['time'].timetuple())
return snapshots
def active_status_checks(self):
return self.status_checks.filter(active=True)
def inactive_status_checks(self):
return self.status_checks.filter(active=False)
def all_passing_checks(self):
return self.active_status_checks().filter(calculated_status=self.CALCULATED_PASSING_STATUS)
def all_failing_checks(self):
return self.active_status_checks().exclude(calculated_status=self.CALCULATED_PASSING_STATUS)
def graphite_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='graphitestatuscheck')
def http_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='httpstatuscheck')
def jenkins_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='jenkinsstatuscheck')
def active_graphite_status_checks(self):
return self.graphite_status_checks().filter(active=True)
def active_http_status_checks(self):
return self.http_status_checks().filter(active=True)
def active_jenkins_status_checks(self):
return self.jenkins_status_checks().filter(active=True)
class ServiceStatusSnapshot(models.Model):
service = models.ForeignKey(Service, related_name='snapshots')
time = models.DateTimeField(db_index=True)
num_checks_active = models.IntegerField(default=0)
num_checks_passing = models.IntegerField(default=0)
num_checks_failing = models.IntegerField(default=0)
overall_status = models.TextField(default=Service.PASSING_STATUS)
did_send_alert = models.IntegerField(default=False)
def __unicode__(self):
return u"%s: %s" % (self.service.name, self.overall_status)
class StatusCheck(PolymorphicModel):
"""
Base class for polymorphic models. We're going to use
proxy models for inheriting because it makes life much simpler,
but this allows us to stick different methods etc on subclasses.
You can work out what (sub)class a model is an instance of by accessing `instance.polymorphic_ctype.model`
We are using django-polymorphic for polymorphism
"""
# Common attributes to all
name = models.TextField()
active = models.BooleanField(
default=True,
help_text='If not active, check will not be used to calculate service status and will not trigger alerts.',
)
importance = models.CharField(
max_length=30,
choices=Service.IMPORTANCES,
default=Service.ERROR_STATUS,
help_text='Severity level of a failure. Critical alerts are for failures you want to wake you up at 2am, Errors are things you can sleep through but need to fix in the morning, and warnings for less important things.'
)
frequency = models.IntegerField(
default=5,
help_text='Minutes between each check.',
)
debounce = models.IntegerField(
default=0,
null=True,
help_text='Number of successive failures permitted before check will be marked as failed. Default is 0, i.e. fail on first failure.'
)
created_by = models.ForeignKey(User)
calculated_status = models.CharField(
max_length=50, choices=Service.STATUSES, default=Service.CALCULATED_PASSING_STATUS, blank=True)
last_run = models.DateTimeField(null=True)
cached_health = models.TextField(editable=False, null=True)
# Graphite checks
metric = models.TextField(
null=True,
help_text='fully.qualified.name of the Graphite metric you want to watch. This can be any valid Graphite expression, including wildcards, multiple hosts, etc.',
)
check_type = models.CharField(
choices=CHECK_TYPES,
max_length=100,
null=True,
)
value = models.TextField(
null=True,
help_text='If this expression evaluates to true, the check will fail (possibly triggering an alert).',
)
expected_num_hosts = models.IntegerField(
default=0,
null=True,
help_text='The minimum number of data series (hosts) you expect to see.',
)
# HTTP checks
endpoint = models.TextField(
null=True,
help_text='HTTP(S) endpoint to poll.',
)
username = models.TextField(
blank=True,
null=True,
help_text='Basic auth username.',
)
password = models.TextField(
blank=True,
null=True,
help_text='Basic auth password.',
)
text_match = models.TextField(
blank=True,
null=True,
help_text='Regex to match against source of page.',
)
status_code = models.TextField(
default=200,
null=True,
help_text='Status code expected from endpoint.'
)
timeout = models.IntegerField(
default=30,
null=True,
help_text='Time out after this many seconds.',
)
verify_ssl_certificate = models.BooleanField(
default=True,
help_text='Set to false to allow not try to verify ssl certificates (default True)',
)
# Jenkins checks
max_queued_build_time = models.IntegerField(
null=True,
blank=True,
help_text='Alert if build queued for more than this many minutes.',
)
class Meta(PolymorphicModel.Meta):
ordering = ['name']
def __unicode__(self):
return self.name
def recent_results(self):
return self.statuscheckresult_set.all().order_by('-time_complete')[:10]
def last_result(self):
try:
return self.recent_results()[0]
except:
return None
def run(self):
start = timezone.now()
try:
result = self._run()
except Exception as e:
result = StatusCheckResult(check=self)
result.error = u'Error in performing check: %s' % (e,)
result.succeeded = False
finish = timezone.now()
result.time = start
result.time_complete = finish
result.save()
self.last_run = finish
self.save()
def _run(self):
"""
Implement on subclasses. Should return a `CheckResult` instance.
"""
raise NotImplementedError('Subclasses should implement')
def save(self, *args, **kwargs):
recent_results = self.recent_results()
if calculate_debounced_passing(recent_results, self.debounce):
self.calculated_status = Service.CALCULATED_PASSING_STATUS
else:
self.calculated_status = Service.CALCULATED_FAILING_STATUS
self.cached_health = serialize_recent_results(recent_results)
ret = super(StatusCheck, self).save(*args, **kwargs)
# Update linked services
self.update_related_services()
return ret
def update_related_services(self):
services = self.service_set.all()
for service in services:
update_service.delay(service.id)
class GraphiteStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "Metric check"
def format_error_message(self, failure_value, actual_hosts):
"""
A summary of why the check is failing for inclusion in hipchat, sms etc
Returns something like:
"5.0 > 4 | 1/2 hosts"
"""
hosts_string = u''
if self.expected_num_hosts > 0:
hosts_string = u' | %s/%s hosts' % (actual_hosts,
self.expected_num_hosts)
if self.expected_num_hosts > actual_hosts:
return u'Hosts missing%s' % hosts_string
if failure_value is None:
return "Failed to get metric from Graphite"
return u"%0.1f %s %0.1f%s" % (
failure_value,
self.check_type,
float(self.value),
hosts_string
)
def _run(self):
series = parse_metric(self.metric, mins_to_check=self.frequency)
failure_value = None
if series['error']:
failed = True
else:
failed = None
result = StatusCheckResult(
check=self,
)
if series['num_series_with_data'] > 0:
result.average_value = series['average_value']
if self.check_type == '<':
failed = float(series['min']) < float(self.value)
if failed:
failure_value = series['min']
elif self.check_type == '<=':
failed = float(series['min']) <= float(self.value)
if failed:
failure_value = series['min']
elif self.check_type == '>':
failed = float(series['max']) > float(self.value)
if failed:
failure_value = series['max']
elif self.check_type == '>=':
failed = float(series['max']) >= float(self.value)
if failed:
failure_value = series['max']
elif self.check_type == '==':
failed = float(self.value) in series['all_values']
if failed:
failure_value = float(self.value)
else:
raise Exception(u'Check type %s not supported' %
self.check_type)
if series['num_series_with_data'] < self.expected_num_hosts:
failed = True
try:
result.raw_data = json.dumps(series['raw'])
except:
result.raw_data = series['raw']
result.succeeded = not failed
if not result.succeeded:
result.error = self.format_error_message(
failure_value,
series['num_series_with_data'],
)
result.actual_hosts = series['num_series_with_data']
result.failure_value = failure_value
return result
class HttpStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "HTTP check"
def _run(self):
result = StatusCheckResult(check=self)
auth = (self.username, self.password)
try:
if self.username or self.password:
resp = requests.get(
self.endpoint,
timeout=self.timeout,
verify=self.verify_ssl_certificate,
auth=auth
)
else:
resp = requests.get(
self.endpoint,
timeout=self.timeout,
verify=self.verify_ssl_certificate,
)
except requests.RequestException as e:
result.error = u'Request error occurred: %s' % (e,)
result.succeeded = False
else:
if self.status_code and resp.status_code != int(self.status_code):
result.error = u'Wrong code: got %s (expected %s)' % (
resp.status_code, int(self.status_code))
result.succeeded = False
result.raw_data = resp.content
elif self.text_match:
if not re.search(self.text_match, resp.content):
result.error = u'Failed to find match regex /%s/ in response body' % self.text_match
result.raw_data = resp.content
result.succeeded = False
else:
result.succeeded = True
else:
result.succeeded = True
return result
class JenkinsStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "Jenkins check"
@property
def failing_short_status(self):
return 'Job failing on Jenkins'
def _run(self):
result = StatusCheckResult(check=self)
try:
status = get_job_status(self.name)
active = status['active']
if status['status_code'] == 404:
result.error = u'Job %s not found on Jenkins' % self.name
result.succeeded = False
return result
elif status['status_code'] > 400:
# Will fall through to next block
raise Exception(u'returned %s' % status['status_code'])
except Exception as e:
# If something else goes wrong, we will *not* fail - otherwise
# a lot of services seem to fail all at once.
# Ugly to do it here but...
result.error = u'Error fetching from Jenkins - %s' % e
result.succeeded = True
return result
if not active:
# We will fail if the job has been disabled
result.error = u'Job "%s" disabled on Jenkins' % self.name
result.succeeded = False
else:
if self.max_queued_build_time and status['blocked_build_time']:
if status['blocked_build_time'] > self.max_queued_build_time * 60:
result.succeeded = False
result.error = u'Job "%s" has blocked build waiting for %ss (> %sm)' % (
self.name,
int(status['blocked_build_time']),
self.max_queued_build_time,
)
else:
result.succeeded = status['succeeded']
else:
result.succeeded = status['succeeded']
if not status['succeeded']:
if result.error:
result.error += u'; Job "%s" failing on Jenkins' % self.name
else:
result.error = u'Job "%s" failing on Jenkins' % self.name
result.raw_data = status
return result
class StatusCheckResult(models.Model):
"""
We use the same StatusCheckResult model for all check types,
because really they are not so very different.
Checks don't have to use all the fields, so most should be
nullable
"""
check = models.ForeignKey(StatusCheck)
time = models.DateTimeField(null=False)
time_complete = models.DateTimeField(null=True, db_index=True)
raw_data = models.TextField(null=True)
succeeded = models.BooleanField(default=False)
error = models.TextField(null=True)
def __unicode__(self):
return '%s: %s @%s' % (self.status, self.check.name, self.time)
@property
def status(self):
if self.succeeded:
return 'succeeded'
else:
return 'failed'
@property
def took(self):
try:
return (self.time_complete - self.time).microseconds / 1000
except:
return None
@property
def short_error(self):
snippet_len = 30
if len(self.error) > snippet_len:
return u"%s..." % self.error[:snippet_len - 3]
else:
return self.error
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
mobile_number = models.CharField(max_length=20, blank=True, default='')
hipchat_alias = models.CharField(max_length=50, blank=True, default='')
fallback_alert_user = models.BooleanField(default=False)
def __unicode__(self):
return 'User profile: %s' % self.user.username
def save(self, *args, **kwargs):
if self.mobile_number.startswith('+'):
self.mobile_number = self.mobile_number[1:]
# Enforce uniqueness
if self.fallback_alert_user:
profiles = UserProfile.objects.exclude(id=self.id)
profiles.update(fallback_alert_user=False)
return super(UserProfile, self).save(*args, **kwargs)
@property
def prefixed_mobile_number(self):
return '+%s' % self.mobile_number
class Shift(models.Model):
start = models.DateTimeField()
end = models.DateTimeField()
user = models.ForeignKey(User)
uid = models.TextField()
deleted = models.BooleanField(default=False)
def __unicode__(self):
deleted = ''
if self.deleted:
deleted = ' (deleted)'
return "%s: %s to %s%s" % (self.user.username, self.start, self.end, deleted)
def get_duty_officers(at_time=None):
"""Returns a list of duty officers for a given time or now if none given"""
duty_officers = []
if not at_time:
at_time = timezone.now()
current_shifts = Shift.objects.filter(
deleted=False,
start__lt=at_time,
end__gt=at_time,
)
if current_shifts:
duty_officers = [shift.user for shift in current_shifts]
return duty_officers
else:
try:
u = UserProfile.objects.get(fallback_alert_user=True)
return [u.user]
except UserProfile.DoesNotExist:
return []
def update_shifts():
events = get_events()
users = User.objects.filter(is_active=True)
user_lookup = {}
for u in users:
user_lookup[u.username.lower()] = u
future_shifts = Shift.objects.filter(start__gt=timezone.now())
future_shifts.update(deleted=True)
for event in events:
e = event['summary'].lower().strip()
if e in user_lookup:
user = user_lookup[e]
try:
s = Shift.objects.get(uid=event['uid'])
except Shift.DoesNotExist:
s = Shift(uid=event['uid'])
s.start = event['start']
s.end = event['end']
s.user = user
s.deleted = False
s.save()
|
{
"content_hash": "b9b725efa803f27c7ecd5ccf4f7a87a1",
"timestamp": "",
"source": "github",
"line_count": 714,
"max_line_length": 225,
"avg_line_length": 33.84453781512605,
"alnum_prop": 0.5980964204427892,
"repo_name": "JensRantil/cabot",
"id": "c86e7dbfc41b2454337ae3d1b46c226516dd7062",
"size": "24165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/cabotapp/models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from boxsdk.exception import BoxAPIException
import csv
import os
from box_manage_users.scripts.script import Script
class MassProvisionScript(Script):
"""
Script to create many users in an enterprise at once.
"""
_title = 'Provision Users & Create Personal Folders'
_message = 'Ensure that the csv file is in the inputs folder with the name input_users.csv'
def run(self):
"""
Base class override.
Open the input_users.csv and create a user for each name/email in the file.
"""
with open(os.path.join('inputs', 'input_users.csv'), 'rb') as f:
reader = csv.reader(f)
for row in reader:
iterrow = iter(row)
for item in iterrow:
name = item
email = next(iterrow)
self.create_user_and_folder(email, name, 'co-owner')
super(MassProvisionScript, self).run()
def create_user_and_folder(self, email, name, access='editor'):
"""
Creates a new user and their own personal folder.
"""
# Log which user script is provisioning in:
self._overview_logger.info('\n\nEmail: %s - Name: %s', email, name)
#Create new enterprise user
try:
new_person = self._client.create_new_user(email, name)
except BoxAPIException as ex:
self._fail_logger.warning('Could not create user {} ({}) - {}'.format(name, email, ex))
return
new_person_id = new_person.id
#Create own personal folder
new_person_folder = self._client.create_new_folder(name, '0')
new_folder_id = new_person_folder.id
#Add new user as collaborator
collab = self._client.add_collab(new_folder_id, new_person_id, access)
new_collab_id = collab.id
#Update new collab to Owner
self._client.update_collab(new_collab_id, "owner")
#Grab new collab_id for the admin
my_collab = self._client.get_all_collabs(new_folder_id)
assert len(my_collab) == 1
my_collab_id = my_collab[0].id
#Removed admin collab_id
self._client.delete_collab(my_collab_id)
self._logger.info('Success!\n')
def main():
MassProvisionScript().run()
if __name__ == '__main__':
main()
|
{
"content_hash": "9185290340b394a3148f56b0a7ca5be6",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 99,
"avg_line_length": 32.42465753424658,
"alnum_prop": 0.596958174904943,
"repo_name": "box-samples/user-management",
"id": "9a21d5e861ac606de89fe5f0ae4c3681fdb83615",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "box_manage_users/scripts/provision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21173"
}
],
"symlink_target": ""
}
|
''' api module '''
from .metrics import (
MetricsHandler,
MetricsTimelineHandler
)
from .topology import (
TopologyExceptionSummaryHandler,
ListTopologiesJsonHandler,
TopologyLogicalPlanJsonHandler,
TopologyPhysicalPlanJsonHandler,
TopologySchedulerLocationJsonHandler,
TopologyExecutionStateJsonHandler,
TopologyExceptionsJsonHandler,
PidHandler,
JstackHandler,
MemoryHistogramHandler,
JmapHandler
)
|
{
"content_hash": "bdc946972ef09b9ed5bc337e22544774",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 41,
"avg_line_length": 23.894736842105264,
"alnum_prop": 0.7731277533039648,
"repo_name": "mycFelix/heron",
"id": "c9e17e0f041296acd2714e534eb286fe81aa167f",
"size": "1239",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "heron/tools/ui/src/python/handlers/api/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14063"
},
{
"name": "C++",
"bytes": "1723731"
},
{
"name": "CSS",
"bytes": "77708"
},
{
"name": "HCL",
"bytes": "5314"
},
{
"name": "HTML",
"bytes": "39432"
},
{
"name": "Java",
"bytes": "4888188"
},
{
"name": "JavaScript",
"bytes": "1107904"
},
{
"name": "M4",
"bytes": "18741"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Objective-C",
"bytes": "2143"
},
{
"name": "Perl",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "1696662"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "130046"
},
{
"name": "Shell",
"bytes": "207639"
},
{
"name": "Smarty",
"bytes": "528"
}
],
"symlink_target": ""
}
|
import argparse
import os
import unicodedata
import fontforge
class MergeType:
""" Used for internal representation of the merge request:
NONE: don't merge the Latin font at all,
PLAIN: use fontforge.mergeFonts(), or
DEEP: look inside the Latin font and cherry-pick the useful stuff. """
NONE, PLAIN, DEEP = range(3)
@classmethod
def fromstring(cls, str):
return getattr(cls, str.upper(), MergeType.NONE)
def mergeLatinFont(orig_font, args):
""" 'args.latin_file' argument points to a font from which Latin glyphs
are taken and added to 'orig_font' as dictated by 'args.merge_type' and
other 'args' options. """
if args.merge_type == MergeType.PLAIN:
orig_font.mergeFonts(args.latin_file)
elif args.merge_type == MergeType.DEEP:
# This only merges a-zA-Z, but could be extended.
latin_font = fontforge.open(args.latin_file)
latin_font.selection.select(("ranges",),"a","z")
latin_font.selection.select(("ranges","more"),"A","Z")
latin_font.copy()
orig_font.selection.select(("ranges",),'a','z')
orig_font.selection.select(("ranges","more"),"A","Z")
orig_font.paste()
return orig_font
def changeLatinDigits(font, args):
if not args.digits_feature_file:
return
digits = [ 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine' ]
for i, c in enumerate(digits):
font.createChar(0x30 + i, c)
font.mergeFeature(args.digits_feature_file)
def build(args):
font = fontforge.open(args.arabic_file)
font.encoding = 'unicode'
mergeLatinFont(font, args)
changeLatinDigits(font, args)
return font
def make_dir_p(directory):
if not os.path.isdir(directory):
os.makedirs(directory)
def main():
parser = argparse.ArgumentParser(description="Build Sahel fonts.")
parser.add_argument("--arabic-file", metavar="FILE", help="input arabic font to process", required=True)
parser.add_argument("--latin-file", metavar="FILE", help="input latin font to process")
parser.add_argument("--out-file", metavar="FILE", help="output font to write", required=True)
parser.add_argument("--feature-file", metavar="FILE", help="input features to use")
parser.add_argument("--digits-feature-file", metavar="FILE", help="input features to use for digits")
parser.add_argument("--merge-type", type=MergeType.fromstring, help="whether 'fontforge.mergeFonts' is to be use")
args = parser.parse_args()
font = build(args)
make_dir_p(os.path.dirname(args.out_file))
font.generate(args.out_file)
if __name__ == "__main__":
main()
|
{
"content_hash": "ee59d3f3be9ab5c60f46ebca91b52775",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 118,
"avg_line_length": 35.64,
"alnum_prop": 0.6603067714178825,
"repo_name": "bateni/qalam-tarash",
"id": "847f2409ff6fdf14caf6409a609bd1a9446a9f79",
"size": "2714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3729"
},
{
"name": "TeX",
"bytes": "1440"
}
],
"symlink_target": ""
}
|
"""\
Word count with stop words (i.e., words that should be ignored).
"""
# DOCS_INCLUDE_START
STOP_WORDS_FN = 'stop_words.txt'
try:
with open(STOP_WORDS_FN) as f:
STOP_WORDS = frozenset(l.strip() for l in f if not l.isspace())
except OSError:
STOP_WORDS = frozenset()
def mapper(_, value, writer):
for word in value.split():
if word in STOP_WORDS:
writer.count("STOP_WORDS", 1)
else:
writer.emit(word, 1)
def reducer(word, icounts, writer):
writer.emit(word, sum(icounts))
|
{
"content_hash": "46578519fc8daf1c6e2f4447e355ad7a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 71,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6139705882352942,
"repo_name": "crs4/pydoop",
"id": "3eaa9eaa3c936c262db7364f4413b9d7ad7e7828",
"size": "1155",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/pydoop_script/scripts/wordcount_sw.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "202110"
},
{
"name": "C++",
"bytes": "101371"
},
{
"name": "Dockerfile",
"bytes": "9590"
},
{
"name": "Emacs Lisp",
"bytes": "153"
},
{
"name": "Java",
"bytes": "177920"
},
{
"name": "Python",
"bytes": "400609"
},
{
"name": "Shell",
"bytes": "29222"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
int_or_none,
parse_duration,
unified_strdate,
)
class VideoLecturesNetIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/'
IE_NAME = 'videolectures.net'
_TEST = {
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
'info_dict': {
'id': 'promogram_igor_mekjavic_eng',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'duration': 565,
'thumbnail': 're:http://.*\.jpg',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
smil_url = 'http://videolectures.net/%s/video/1/smil.xml' % video_id
smil = self._download_xml(smil_url, video_id)
title = find_xpath_attr(smil, './/meta', 'name', 'title').attrib['content']
description_el = find_xpath_attr(smil, './/meta', 'name', 'abstract')
description = (
None if description_el is None
else description_el.attrib['content'])
upload_date = unified_strdate(
find_xpath_attr(smil, './/meta', 'name', 'date').attrib['content'])
switch = smil.find('.//switch')
duration = parse_duration(switch.attrib.get('dur'))
thumbnail_el = find_xpath_attr(switch, './image', 'type', 'thumbnail')
thumbnail = (
None if thumbnail_el is None else thumbnail_el.attrib.get('src'))
formats = [{
'url': v.attrib['src'],
'width': int_or_none(v.attrib.get('width')),
'height': int_or_none(v.attrib.get('height')),
'filesize': int_or_none(v.attrib.get('size')),
'tbr': int_or_none(v.attrib.get('systemBitrate')) / 1000.0,
'ext': v.attrib.get('ext'),
} for v in switch.findall('./video')
if v.attrib.get('proto') == 'http']
return {
'id': video_id,
'title': title,
'description': description,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'formats': formats,
}
|
{
"content_hash": "9cd1639bd21c0150d507fcb5c638af64",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 83,
"avg_line_length": 34.55714285714286,
"alnum_prop": 0.5431996692848284,
"repo_name": "Celthi/youtube-dl-GUI",
"id": "ebd2a3dca3ac0e7bd812226c80c356a19b3677ab",
"size": "2419",
"binary": false,
"copies": "40",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/videolecturesnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "2064276"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='django-waffle',
version='2.2.0',
description='A feature flipper for Django.',
long_description=open('README.rst').read(),
author='James Socol',
author_email='me@jamessocol.com',
url='http://github.com/django-waffle/django-waffle',
license='BSD',
packages=find_packages(exclude=['test_app', 'test_settings']),
include_package_data=True,
package_data={'': ['README.rst']},
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
{
"content_hash": "457c49b735f98d72410ccd4bbb6c152f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 37.351351351351354,
"alnum_prop": 0.5875542691751086,
"repo_name": "willkg/django-waffle",
"id": "489e7635b6e3fec9cb4f810339b0b5b9b9c7fe24",
"size": "1382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "911"
},
{
"name": "Python",
"bytes": "94230"
}
],
"symlink_target": ""
}
|
from .stage01_isotopomer_spectrumAccuracy_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_isotopomer_spectrumAccuracy_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_isotopomer_spectrumAccuracy':data_stage01_isotopomer_spectrumAccuracy,
'data_stage01_isotopomer_spectrumAccuracyNormSum':data_stage01_isotopomer_spectrumAccuracyNormSum,
};
self.set_supportedTables(tables_supported);
def initialize_dataStage01_isotopomer_spectrumAccuracy(self):
try:
data_stage01_isotopomer_spectrumAccuracy.__table__.create(self.engine,True);
data_stage01_isotopomer_spectrumAccuracyNormSum.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e);
def drop_dataStage01_isotopomer_spectrumAccuracy(self):
try:
data_stage01_isotopomer_spectrumAccuracy.__table__.drop(self.engine,True);
data_stage01_isotopomer_spectrumAccuracyNormSum.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
|
{
"content_hash": "1797ff32e744931ef4a439bd31c009c1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 112,
"avg_line_length": 54.064516129032256,
"alnum_prop": 0.7356801909307876,
"repo_name": "dmccloskey/SBaaS_isotopomer",
"id": "f6c27418c64c814caafe06a3f940f065a9c77179",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SBaaS_isotopomer/stage01_isotopomer_spectrumAccuracy_query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "544550"
}
],
"symlink_target": ""
}
|
import unittest
from conans.util.files import load
from conans.test.utils.tools import TestClient
class ConfigTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
def basic_test(self):
# show the full file
self.client.run("config get")
self.assertIn("default_profile = default", self.client.user_io.out)
self.assertIn("path = ~/.conan/data", self.client.user_io.out)
def storage_test(self):
# show the full file
self.client.run("config get storage")
self.assertIn("path = ~/.conan/data", self.client.user_io.out)
self.client.run("config get storage.path")
self.assertIn("~/.conan/data", self.client.user_io.out)
self.assertNotIn("path:", self.client.user_io.out)
def errors_test(self):
error = self.client.run("config get whatever", ignore_error=True)
self.assertTrue(error)
self.assertIn("'whatever' is not a section of conan.conf", self.client.user_io.out)
error = self.client.run("config get whatever.what", ignore_error=True)
self.assertTrue(error)
self.assertIn("'whatever' is not a section of conan.conf", self.client.user_io.out)
error = self.client.run("config get storage.what", ignore_error=True)
self.assertTrue(error)
self.assertIn("'what' doesn't exist in [storage]", self.client.user_io.out)
error = self.client.run('config set proxies=https:', ignore_error=True)
self.assertTrue(error)
self.assertIn("You can't set a full section, please specify a key=value",
self.client.user_io.out)
error = self.client.run('config set proxies.http:Value', ignore_error=True)
self.assertTrue(error)
self.assertIn("Please specify 'key=value'", self.client.user_io.out)
def define_test(self):
self.client.run("config set general.fakeos=Linux")
conf_file = load(self.client.paths.conan_conf_path)
self.assertIn("fakeos = Linux", conf_file)
self.client.run('config set general.compiler="Other compiler"')
conf_file = load(self.client.paths.conan_conf_path)
self.assertIn('compiler = Other compiler', conf_file)
self.client.run('config set general.compiler.version=123.4.5')
conf_file = load(self.client.paths.conan_conf_path)
self.assertIn('compiler.version = 123.4.5', conf_file)
self.assertNotIn("14", conf_file)
self.client.run('config set general.new_setting=mysetting ')
conf_file = load(self.client.paths.conan_conf_path)
self.assertIn('new_setting = mysetting', conf_file)
self.client.run('config set proxies.https=myurl')
conf_file = load(self.client.paths.conan_conf_path)
self.assertIn("https = myurl", conf_file.splitlines())
def remove_test(self):
self.client.run('config set proxies.https=myurl')
self.client.run('config rm proxies.https')
conf_file = load(self.client.paths.conan_conf_path)
self.assertNotIn('myurl', conf_file)
def remove_section_test(self):
self.client.run('config rm proxies')
conf_file = load(self.client.paths.conan_conf_path)
self.assertNotIn('[proxies]', conf_file)
def remove_envvar_test(self):
self.client.run('config set env.MY_VAR=MY_VALUE')
conf_file = load(self.client.paths.conan_conf_path)
self.assertIn('MY_VAR = MY_VALUE', conf_file)
self.client.run('config rm env.MY_VAR')
conf_file = load(self.client.paths.conan_conf_path)
self.assertNotIn('MY_VAR', conf_file)
|
{
"content_hash": "9cc64560000d42768ee16f83ff78a983",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 91,
"avg_line_length": 41.793103448275865,
"alnum_prop": 0.6512651265126512,
"repo_name": "luckielordie/conan",
"id": "16db9dd1b02c85d9bfbe83c7a328733415601623",
"size": "3636",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "conans/test/command/config_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "Dockerfile",
"bytes": "3392"
},
{
"name": "Groovy",
"bytes": "7992"
},
{
"name": "Python",
"bytes": "3232431"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
}
|
import time
import flask
import mcbench.xpath
from mcbench.models import Benchmark, Query
from mcbench import querier
from mcbench import app
def redirect(url_name, *args, **kwargs):
return flask.redirect(flask.url_for(url_name, *args, **kwargs))
def get_valid_query_or_throw():
xpath = flask.request.args.get('query') or None
if xpath is None:
return None
mcbench.xpath.compile(xpath)
return xpath
@app.route('/', methods=['GET'])
def index():
return flask.render_template('index.html', queries=Query.saved())
@app.route('/help', methods=['GET'])
def help():
return flask.render_template('help.html')
@app.route('/about', methods=['GET'])
def about():
return flask.render_template('about.html')
@app.route('/list', methods=['GET'])
def benchmark_list():
try:
xpath = get_valid_query_or_throw()
except mcbench.xpath.XPathError as e:
flask.flash(str(e), 'error')
return redirect('index', query=e.query)
if xpath is None:
benchmarks = list(Benchmark.all())
return flask.render_template('list.html', benchmarks=benchmarks)
start = time.time()
try:
matches = querier.get_matches(xpath)
except mcbench.xpath.XPathError as e:
flask.flash(str(e), 'error')
return redirect('index', query=e.query)
elapsed_time = time.time() - start
return flask.render_template(
'search.html',
show_save_query_form=not Query.find_by_xpath(xpath).is_saved,
matches=matches,
query=xpath,
elapsed_time=elapsed_time,
total_matches=sum(m.num_matches for m in matches),
total_benchmarks=Benchmark.count())
@app.route('/benchmark/<name>', methods=['GET'])
def benchmark(name):
benchmark = Benchmark.find_by_name(name)
try:
query = get_valid_query_or_throw()
hl_lines = querier.matching_lines(benchmark, query)
except mcbench.xpath.XPathError as e:
flask.flash(str(e), 'error')
query = None
hl_lines = querier.matching_lines(benchmark, None)
num_matches = sum(len(v['m']) for v in hl_lines.values())
return flask.render_template(
'benchmark.html',
benchmark=benchmark,
files=list(benchmark.files),
hl_lines=hl_lines,
num_matches=num_matches,
)
@app.route('/save_query', methods=['POST'])
def save_query():
xpath = flask.request.values['xpath']
name = flask.request.values['name']
query = Query.find_by_xpath(xpath)
if query is None:
flask.flash('No such query exists!', 'error')
else:
query.name = name
query.save()
flask.flash("Query '%s' successfully saved." % name, 'info')
return redirect('benchmark_list', query=xpath)
@app.route('/delete_query', methods=['POST'])
def delete_query():
xpath = flask.request.values['xpath']
query = Query.find_by_xpath(xpath)
if query is None:
flask.flash('No such query exists!', 'error')
else:
name = query.name
query.unsave()
flask.flash("Query '%s' successfully deleted." % name, 'info')
return redirect('index')
|
{
"content_hash": "015f4da9789167a576e9095892f95cf1",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 72,
"avg_line_length": 26.94871794871795,
"alnum_prop": 0.6336822074215034,
"repo_name": "isbadawi/mcbench",
"id": "c186e7482ea69581bba38cb879e9626716ca1528",
"size": "3153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcbench/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3906"
},
{
"name": "Matlab",
"bytes": "16472"
},
{
"name": "Python",
"bytes": "23361"
}
],
"symlink_target": ""
}
|
"""
line
~~~~
May the LINE be with you...
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
from .client import LineClient
from .api import LineAPI
from .models import LineGroup, LineContact, LineRoom, LineBase, LineMessage
__copyright__ = 'Copyright 2014 by Taehoon Kim'
__version__ = '0.8.2'
__license__ = 'BSD'
__author__ = 'Taehoon Kim'
__author_email__ = 'carpedm20@gmail.com'
__url__ = 'http://github.com/carpedm20/line'
__all__ = [
# LineClient object
'LineClient',
# model wrappers for LINE API
'LineGroup', 'LineContact', 'LineRoom', 'LineBase', 'LineMessage',
# Line Thrift API
'LineAPI',
]
|
{
"content_hash": "eede1b6e483ac36f5a5d7ffe25435aaa",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 24.535714285714285,
"alnum_prop": 0.6375545851528385,
"repo_name": "bot1line1/test8",
"id": "b7ab132a22090f194a9a83503fc60c17f7a2fc49",
"size": "711",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "line/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "101"
},
{
"name": "Python",
"bytes": "42997"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib import auth
import facebook
import datetime
class DjangoFacebook(object):
""" Simple accessor object for the Facebook user. """
def __init__(self, user):
self.user = user
self.uid = user['uid']
self.graph = facebook.GraphAPI(user['access_token'])
class FacebookDebugCanvasMiddleware(object):
""" Emulates signed_request behaviour to test your applications embedding.
This should be a raw string as is sent from facebook to the server in the POST
data, obtained by LiveHeaders, Firebug or similar. This should initialised
before FacebookMiddleware.
"""
def process_request(self, request):
cp = request.POST.copy()
request.POST = cp
request.POST['signed_request'] = settings.FACEBOOK_DEBUG_SIGNEDREQ
return None
class FacebookDebugCookieMiddleware(object):
""" Sets an imaginary cookie to make it easy to work from a development environment.
This should be a raw string as is sent from a browser to the server, obtained by
LiveHeaders, Firebug or similar. The middleware takes care of naming the cookie
correctly. This should initialised before FacebookMiddleware.
"""
def process_request(self, request):
cookie_name = "fbs_" + settings.FACEBOOK_APP_ID
request.COOKIES[cookie_name] = settings.FACEBOOK_DEBUG_COOKIE
return None
class FacebookDebugTokenMiddleware(object):
""" Forces a specific access token to be used.
This should be used instead of FacebookMiddleware. Make sure you have
FACEBOOK_DEBUG_UID and FACEBOOK_DEBUG_TOKEN set in your configuration.
"""
def process_request(self, request):
user = {
'uid':settings.FACEBOOK_DEBUG_UID,
'access_token':settings.FACEBOOK_DEBUG_TOKEN,
}
request.facebook = DjangoFacebook(user)
return None
class FacebookMiddleware(object):
""" Transparently integrate Django accounts with Facebook.
If the user presents with a valid facebook cookie, then we want them to be
automatically logged in as that user. We rely on the authentication backend
to create the user if it does not exist.
We do not want to persist the facebook login, so we avoid calling auth.login()
with the rationale that if they log out via fb:login-button we want them to
be logged out of Django also.
We also want to allow people to log in with other backends, which means we
need to be careful before replacing request.user.
"""
def get_fb_user_cookie(self, request):
""" Attempt to find a facebook user using a cookie. """
fb_user = facebook.get_user_from_cookie(request.COOKIES,
settings.FACEBOOK_APP_ID, settings.FACEBOOK_SECRET_KEY)
if fb_user:
fb_user['method'] = 'cookie'
return fb_user
def get_fb_user_canvas(self, request):
""" Attempt to find a user using a signed_request (canvas). """
fb_user = None
if request.POST.get('signed_request'):
signed_request = request.POST["signed_request"]
data = facebook.parse_signed_request(signed_request, settings.FACEBOOK_SECRET_KEY)
if data and data.get('user_id'):
fb_user = data['user']
fb_user['method'] = 'canvas'
fb_user['uid'] = data['user_id']
fb_user['access_token'] = data['oauth_token']
return fb_user
def get_fb_user(self, request):
""" Return a dict containing the facebook user details, if found.
The dict must contain the auth method, uid, access_token and any
other information that was made available by the authentication
method.
"""
fb_user = None
methods = ['get_fb_user_cookie', 'get_fb_user_canvas']
for method in methods:
fb_user = getattr(self, method)(request)
if (fb_user):
break
return fb_user
def process_request(self, request):
""" Add `facebook` into the request context and attempt to authenticate the user.
If no user was found, request.facebook will be None. Otherwise it will contain
a DjangoFacebook object containing:
uid: The facebook users UID
user: Any user information made available as part of the authentication process
graph: A GraphAPI object connected to the current user.
An attempt to authenticate the user is also made. The fb_uid and fb_graphtoken
parameters are passed and are available for any AuthenticationBackends.
The user however is not "logged in" via login() as facebook sessions are ephemeral
and must be revalidated on every request.
"""
fb_user = self.get_fb_user(request)
request.facebook = DjangoFacebook(fb_user) if fb_user else None
if fb_user and request.user.is_anonymous():
user = auth.authenticate(fb_uid=fb_user['uid'], fb_graphtoken=fb_user['access_token'])
if user:
user.last_login = datetime.datetime.now()
user.save()
request.user = user
return None
|
{
"content_hash": "b768cfe97e773ca40268d1da0bd4e765",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 98,
"avg_line_length": 39.57575757575758,
"alnum_prop": 0.6571592649310873,
"repo_name": "aidanlister/django-facebook",
"id": "d46ad906de8136f0eacdc884892fc2629de2ebc3",
"size": "5224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_facebook/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11435"
}
],
"symlink_target": ""
}
|
"""This module contains Splittable DoFn logic that is specific to DirectRunner.
"""
from threading import Lock
from threading import Timer
import apache_beam as beam
from apache_beam import TimeDomain
from apache_beam import pvalue
from apache_beam.io.iobase import RestrictionTracker
from apache_beam.pipeline import PTransformOverride
from apache_beam.runners.common import DoFnContext
from apache_beam.runners.common import DoFnInvoker
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.common import OutputProcessor
from apache_beam.runners.direct.evaluation_context import DirectStepContext
from apache_beam.runners.direct.util import KeyedWorkItem
from apache_beam.runners.direct.watermark_manager import WatermarkManager
from apache_beam.runners.sdf_common import ElementAndRestriction
from apache_beam.runners.sdf_common import ProcessKeyedElements
from apache_beam.transforms.core import ProcessContinuation
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.trigger import _ValueStateTag
from apache_beam.utils.windowed_value import WindowedValue
class ProcessKeyedElementsViaKeyedWorkItemsOverride(PTransformOverride):
"""A transform override for ProcessElements transform."""
def matches(self, applied_ptransform):
return isinstance(
applied_ptransform.transform, ProcessKeyedElements)
def get_replacement_transform(self, ptransform):
return ProcessKeyedElementsViaKeyedWorkItems(ptransform)
class ProcessKeyedElementsViaKeyedWorkItems(PTransform):
"""A transform that processes Splittable DoFn input via KeyedWorkItems."""
def __init__(self, process_keyed_elements_transform):
self._process_keyed_elements_transform = process_keyed_elements_transform
def expand(self, pcoll):
process_elements = ProcessElements(
self._process_keyed_elements_transform)
process_elements.args = (
self._process_keyed_elements_transform.ptransform_args)
process_elements.kwargs = (
self._process_keyed_elements_transform.ptransform_kwargs)
process_elements.side_inputs = (
self._process_keyed_elements_transform.ptransform_side_inputs)
return pcoll | beam.core.GroupByKey() | process_elements
class ProcessElements(PTransform):
"""A primitive transform for processing keyed elements or KeyedWorkItems.
Will be evaluated by
`runners.direct.transform_evaluator._ProcessElementsEvaluator`.
"""
def __init__(self, process_keyed_elements_transform):
self._process_keyed_elements_transform = process_keyed_elements_transform
self.sdf = self._process_keyed_elements_transform.sdf
def expand(self, pcoll):
return pvalue.PCollection(pcoll.pipeline)
def new_process_fn(self, sdf):
return ProcessFn(
sdf,
self._process_keyed_elements_transform.ptransform_args,
self._process_keyed_elements_transform.ptransform_kwargs)
class ProcessFn(beam.DoFn):
"""A `DoFn` that executes machineary for invoking a Splittable `DoFn`.
Input to the `ParDo` step that includes a `ProcessFn` will be a `PCollection`
of `ElementAndRestriction` objects.
This class is mainly responsible for following.
(1) setup environment for properly invoking a Splittable `DoFn`.
(2) invoke `process()` method of a Splittable `DoFn`.
(3) after the `process()` invocation of the Splittable `DoFn`, determine if a
re-invocation of the element is needed. If this is the case, set state and
a timer for a re-invocation and hold output watermark till this
re-invocation.
(4) after the final invocation of a given element clear any previous state set
for re-invoking the element and release the output watermark.
"""
def __init__(
self, sdf, args_for_invoker, kwargs_for_invoker):
self.sdf = sdf
self._element_tag = _ValueStateTag('element')
self._restriction_tag = _ValueStateTag('restriction')
self.watermark_hold_tag = _ValueStateTag('watermark_hold')
self._process_element_invoker = None
self.sdf_invoker = DoFnInvoker.create_invoker(
DoFnSignature(self.sdf), context=DoFnContext('unused_context'),
input_args=args_for_invoker, input_kwargs=kwargs_for_invoker)
self._step_context = None
@property
def step_context(self):
return self._step_context
@step_context.setter
def step_context(self, step_context):
assert isinstance(step_context, DirectStepContext)
self._step_context = step_context
def set_process_element_invoker(self, process_element_invoker):
assert isinstance(process_element_invoker, SDFProcessElementInvoker)
self._process_element_invoker = process_element_invoker
def process(self, element, timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam, *args, **kwargs):
if isinstance(element, KeyedWorkItem):
# Must be a timer firing.
key = element.encoded_key
else:
key, values = element
values = list(values)
assert len(values) == 1
# Value here will either be a WindowedValue or an ElementAndRestriction
# object.
# TODO: handle key collisions here.
assert len(values) == 1, 'Internal error. Processing of splittable ' \
'DoFn cannot continue since elements did not ' \
'have unique keys.'
value = values[0]
if len(values) != 1:
raise ValueError('')
state = self._step_context.get_keyed_state(key)
element_state = state.get_state(window, self._element_tag)
# Initially element_state is an empty list.
is_seed_call = not element_state
if not is_seed_call:
element = state.get_state(window, self._element_tag)
restriction = state.get_state(window, self._restriction_tag)
windowed_element = WindowedValue(element, timestamp, [window])
else:
# After values iterator is expanded above we should have gotten a list
# with a single ElementAndRestriction object.
assert isinstance(value, ElementAndRestriction)
element_and_restriction = value
element = element_and_restriction.element
restriction = element_and_restriction.restriction
if isinstance(value, WindowedValue):
windowed_element = WindowedValue(
element, value.timestamp, value.windows)
else:
windowed_element = WindowedValue(element, timestamp, [window])
tracker = self.sdf_invoker.invoke_create_tracker(restriction)
assert self._process_element_invoker
assert isinstance(self._process_element_invoker,
SDFProcessElementInvoker)
output_values = self._process_element_invoker.invoke_process_element(
self.sdf_invoker, windowed_element, tracker, *args, **kwargs)
sdf_result = None
for output in output_values:
if isinstance(output, SDFProcessElementInvoker.Result):
# SDFProcessElementInvoker.Result should be the last item yielded.
sdf_result = output
break
yield output
assert sdf_result, ('SDFProcessElementInvoker must return a '
'SDFProcessElementInvoker.Result object as the last '
'value of a SDF invoke_process_element() invocation.')
if not sdf_result.residual_restriction:
# All work for current residual and restriction pair is complete.
state.clear_state(window, self._element_tag)
state.clear_state(window, self._restriction_tag)
# Releasing output watermark by setting it to positive infinity.
state.add_state(window, self.watermark_hold_tag,
WatermarkManager.WATERMARK_POS_INF)
else:
state.add_state(window, self._element_tag, element)
state.add_state(window, self._restriction_tag,
sdf_result.residual_restriction)
# Holding output watermark by setting it to negative infinity.
state.add_state(window, self.watermark_hold_tag,
WatermarkManager.WATERMARK_NEG_INF)
# Setting a timer to be reinvoked to continue processing the element.
# Currently Python SDK only supports setting timers based on watermark. So
# forcing a reinvocation by setting a timer for watermark negative
# infinity.
# TODO(chamikara): update this by setting a timer for the proper
# processing time when Python SDK supports that.
state.set_timer(
window, '', TimeDomain.WATERMARK, WatermarkManager.WATERMARK_NEG_INF)
class SDFProcessElementInvoker(object):
"""A utility that invokes SDF `process()` method and requests checkpoints.
This class is responsible for invoking the `process()` method of a Splittable
`DoFn` and making sure that invocation terminated properly. Based on the input
configuration, this class may decide to request a checkpoint for a `process()`
execution so that runner can process current output and resume the invocation
at a later time.
More specifically, when initializing a `SDFProcessElementInvoker`, caller may
specify the number of output elements or processing time after which a
checkpoint should be requested. This class is responsible for properly
requesting a checkpoint based on either of these criteria.
When the `process()` call of Splittable `DoFn` ends, this class performs
validations to make sure that processing ended gracefully and returns a
`SDFProcessElementInvoker.Result` that contains information which can be used
by the caller to perform another `process()` invocation for the residual.
A `process()` invocation may decide to give up processing voluntarily by
returning a `ProcessContinuation` object (see documentation of
`ProcessContinuation` for more details). So if a 'ProcessContinuation' is
produced this class ends the execution and performs steps to finalize the
current invocation.
"""
class Result(object):
def __init__(
self, residual_restriction=None, process_continuation=None,
future_output_watermark=None):
"""Returned as a result of a `invoke_process_element()` invocation.
Args:
residual_restriction: a restriction for the unprocessed part of the
element.
process_continuation: a `ProcessContinuation` if one was returned as the
last element of the SDF `process()` invocation.
future_output_watermark: output watermark of the results that will be
produced when invoking the Splittable `DoFn`
for the current element with
`residual_restriction`.
"""
self.residual_restriction = residual_restriction
self.process_continuation = process_continuation
self.future_output_watermark = future_output_watermark
def __init__(
self, max_num_outputs, max_duration):
self._max_num_outputs = max_num_outputs
self._max_duration = max_duration
self._checkpoint_lock = Lock()
def test_method(self):
raise ValueError
def invoke_process_element(
self, sdf_invoker, element, tracker, *args, **kwargs):
"""Invokes `process()` method of a Splittable `DoFn` for a given element.
Args:
sdf_invoker: a `DoFnInvoker` for the Splittable `DoFn`.
element: the element to process
tracker: a `RestrictionTracker` for the element that will be passed when
invoking the `process()` method of the Splittable `DoFn`.
Returns:
a `SDFProcessElementInvoker.Result` object.
"""
assert isinstance(sdf_invoker, DoFnInvoker)
assert isinstance(tracker, RestrictionTracker)
class CheckpointState(object):
def __init__(self):
self.checkpointed = None
self.residual_restriction = None
checkpoint_state = CheckpointState()
def initiate_checkpoint():
with self._checkpoint_lock:
if checkpoint_state.checkpointed:
return
checkpoint_state.residual_restriction = tracker.checkpoint()
checkpoint_state.checkpointed = object()
output_processor = _OutputProcessor()
Timer(self._max_duration, initiate_checkpoint).start()
sdf_invoker.invoke_process(
element, restriction_tracker=tracker, output_processor=output_processor,
additional_args=args, additional_kwargs=kwargs)
assert output_processor.output_iter is not None
output_count = 0
# We have to expand and re-yield here to support ending execution for a
# given number of output elements as well as to capture the
# ProcessContinuation of one was returned.
process_continuation = None
for output in output_processor.output_iter:
# A ProcessContinuation, if returned, should be the last element.
assert not process_continuation
if isinstance(output, ProcessContinuation):
# Taking a checkpoint so that we can determine primary and residual
# restrictions.
initiate_checkpoint()
# A ProcessContinuation should always be the last element produced by
# the output iterator.
# TODO: support continuing after the specified amount of delay.
# Continuing here instead of breaking to enforce that this is the last
# element.
process_continuation = output
continue
yield output
output_count += 1
if self._max_num_outputs and output_count >= self._max_num_outputs:
initiate_checkpoint()
tracker.check_done()
result = (
SDFProcessElementInvoker.Result(
residual_restriction=checkpoint_state.residual_restriction)
if checkpoint_state.residual_restriction
else SDFProcessElementInvoker.Result())
yield result
class _OutputProcessor(OutputProcessor):
def __init__(self):
self.output_iter = None
def process_outputs(self, windowed_input_element, output_iter):
self.output_iter = output_iter
|
{
"content_hash": "e48b0ff81c041d93ad50be8478617ed4",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 80,
"avg_line_length": 40.565597667638485,
"alnum_prop": 0.7113698433232716,
"repo_name": "tgroh/beam",
"id": "610664be9232f99911b77251fb0c9c1842962f32",
"size": "14699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/direct/sdf_direct_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "5994"
},
{
"name": "Go",
"bytes": "2167258"
},
{
"name": "Groovy",
"bytes": "127719"
},
{
"name": "Java",
"bytes": "17206671"
},
{
"name": "Python",
"bytes": "3584300"
},
{
"name": "Shell",
"bytes": "82600"
}
],
"symlink_target": ""
}
|
class Sheet(object):
""" sheet: Top level object.
Models the entire music sheet """
def __init__(self, name):
super(Sheet, self).__init__()
self.name = name
self.bars = list()
class Bar(object):
""" bar: Models a measure.
Compose the sheet as the temporal layer
=> Where the notes are displayed on the sheet """
def __init__(self, time_signature=4):
super(Bar, self).__init__()
self.time_signature = time_signature
self.voices = list()
class Note(object):
""" note: Models the unit in music representation"""
def __init__(self, pitch, duration=1):
super(Note, self).__init__()
self.pitch = pitch
self.duration = duration
|
{
"content_hash": "9ec672187da48bc776e215fc7a99a727",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 53,
"avg_line_length": 28.434782608695652,
"alnum_prop": 0.6574923547400612,
"repo_name": "ograndedjogo/tab-translator",
"id": "8de8351086cfee1dafede2805874a1c1f786d11b",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tabtranslator/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10717"
},
{
"name": "Shell",
"bytes": "1443"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "integration_test.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "56538417d097b77d1c396bf2029230c7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.6237373737373737,
"repo_name": "1024inc/django-rq",
"id": "d3f364966d0ad736d94c40c4b7bfb25d370b2ebd",
"size": "814",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integration_test/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "28218"
},
{
"name": "Makefile",
"bytes": "173"
},
{
"name": "Python",
"bytes": "96432"
}
],
"symlink_target": ""
}
|
import binascii
import sys
class ProtocolTreeNode(object):
def __init__(self, tag, attributes = None, children = None, data = None):
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.data = data
assert type(self.children) is list, "Children must be a list, got %s" % type(self.children)
def __eq__(self, protocolTreeNode):
"""
:param protocolTreeNode: ProtocolTreeNode
:return: bool
"""
#
if protocolTreeNode.__class__ == ProtocolTreeNode\
and self.tag == protocolTreeNode.tag\
and self.data == protocolTreeNode.data\
and self.attributes == protocolTreeNode.attributes\
and len(self.getAllChildren()) == len(protocolTreeNode.getAllChildren()):
found = False
for c in self.getAllChildren():
for c2 in protocolTreeNode.getAllChildren():
if c == c2:
found = True
break
if not found:
return False
found = False
for c in protocolTreeNode.getAllChildren():
for c2 in self.getAllChildren():
if c == c2:
found = True
break
if not found:
return False
return True
return False
def __hash__(self):
return hash(self.tag) ^ hash(tuple(self.attributes.items())) ^ hash(self.data)
def toString(self):
out = "<"+self.tag
if self.attributes is not None:
for key,val in self.attributes.items():
out+= " "+key+'="'+val+'"'
out+= ">\n"
if self.data is not None:
if type(self.data) is bytearray:
try:
out += "%s" % self.data.decode()
except UnicodeDecodeError:
out += binascii.hexlify(self.data)
else:
try:
out += "%s" % self.data
except UnicodeDecodeError:
try:
out += "%s" % self.data.decode()
except UnicodeDecodeError:
out += binascii.hexlify(self.data)
if type(self.data) is str and sys.version_info >= (3,0):
out += "\nHEX3:%s\n" % binascii.hexlify(self.data.encode('latin-1'))
else:
out += "\nHEX:%s\n" % binascii.hexlify(self.data)
for c in self.children:
try:
out += c.toString()
except UnicodeDecodeError:
out += "[ENCODED DATA]\n"
out+= "</"+self.tag+">\n"
return out
def __str__(self):
return self.toString()
def getData(self):
return self.data
def setData(self, data):
self.data = data
@staticmethod
def tagEquals(node,string):
return node is not None and node.tag is not None and node.tag == string
@staticmethod
def require(node,string):
if not ProtocolTreeNode.tagEquals(node,string):
raise Exception("failed require. string: "+string);
def __getitem__(self, key):
return self.getAttributeValue(key)
def __setitem__(self, key, val):
self.setAttribute(key, val)
def __delitem__(self, key):
self.removeAttribute(key)
def getChild(self,identifier):
if type(identifier) == int:
if len(self.children) > identifier:
return self.children[identifier]
else:
return None
for c in self.children:
if identifier == c.tag:
return c
return None
def hasChildren(self):
return len(self.children) > 0
def addChild(self, childNode):
self.children.append(childNode)
def addChildren(self, children):
for c in children:
self.addChild(c)
def getAttributeValue(self,string):
try:
return self.attributes[string]
except KeyError:
return None
def removeAttribute(self, key):
if key in self.attributes:
del self.attributes[key]
def setAttribute(self, key, value):
self.attributes[key] = value
def getAllChildren(self,tag = None):
ret = []
if tag is None:
return self.children
for c in self.children:
if tag == c.tag:
ret.append(c)
return ret
|
{
"content_hash": "a820b2424fc850f4e2b4dce7b09eaad1",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 99,
"avg_line_length": 29.296296296296298,
"alnum_prop": 0.5004214075010536,
"repo_name": "biji/yowsup",
"id": "f6f1ce9daf6f2713c6276268b8e598c4da35ff2b",
"size": "4746",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "yowsup/structs/protocoltreenode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222487"
}
],
"symlink_target": ""
}
|
class coin_type:
def __init__(self, name, symbol, public_magic, private_magic, bip32_code, public_key_version, address_prefix, wif_version):
self.name = name
self.symbol = symbol
self.public_magic = public_magic
self.private_magic = private_magic
self.bip32_code = bip32_code
self.public_key_version = public_key_version
self.address_prefix = address_prefix
self.wif_version = wif_version
# Bitcoin
btc = coin_type(
name = "Bitcoin",
symbol = "btc",
public_magic = "0488B21E", # xpub
private_magic = "0488ADE4", # xprv
bip32_code = "0",
public_key_version = "00",
address_prefix = "1",
wif_version = "80")
# Bitcoin cash
bch = coin_type(
name = "Bitcoin Cash",
symbol = "bch",
public_magic = "0488B21E", # xpub
private_magic = "0488ADE4", # xprv
bip32_code = "145",
public_key_version = "00",
address_prefix = "1",
wif_version = "80")
# Litecoin
ltc = coin_type(
name = "Litecoin",
symbol = "ltc",
public_magic = "019DA462", # Ltub
private_magic = "019D9CFE", # Ltpv
bip32_code = "2",
public_key_version = "30",
address_prefix = "",
wif_version = "B0")
|
{
"content_hash": "db471c62742a722bc10369f505c89f7b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 125,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.637719298245614,
"repo_name": "gcnaccount/pywallet",
"id": "6dc221b0394e28a4396e88f1f9504c6938c8151a",
"size": "1140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/coin_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118490"
},
{
"name": "Shell",
"bytes": "10472"
}
],
"symlink_target": ""
}
|
import logging
import random
from celery import Celery
from celery._state import set_default_app
from celery.task import task
from django.conf import settings
from django.utils import timezone
celery = Celery(__name__)
celery.config_from_object(settings)
# Celery should set this app as the default, however the 'celery.current_app'
# api uses threadlocals, so code running in different threads/greenlets uses
# the fallback default instead of this app when no app is specified. This
# causes confusing connection errors when celery tries to connect to a
# non-existent rabbitmq server. It seems to happen mostly when using the
# 'celery.canvas' api. To get around this, we use the internal 'celery._state'
# api to force our app to be the default.
set_default_app(celery)
logger = logging.getLogger(__name__)
@task(ignore_result=True)
def run_status_check(check_or_id):
from .models import StatusCheck
if not isinstance(check_or_id, StatusCheck):
check = StatusCheck.objects.get(id=check_or_id)
else:
check = check_or_id
# This will call the subclass method
check.run()
@task(ignore_result=True)
def run_all_checks():
from .models import StatusCheck
from datetime import timedelta
checks = StatusCheck.objects.all()
seconds = range(60)
for check in checks:
if check.last_run:
next_schedule = check.last_run + timedelta(minutes=check.frequency)
if (not check.last_run) or timezone.now() > next_schedule:
delay = random.choice(seconds)
logger.debug('Scheduling task for %s seconds from now' % delay)
run_status_check.apply_async((check.id,), countdown=delay)
@task(ignore_result=True)
def update_services(ignore_result=True):
# Avoid importerrors and the like from legacy scheduling
return
@task(ignore_result=True)
def update_service(service_or_id):
from .models import Service
if not isinstance(service_or_id, Service):
service = Service.objects.get(id=service_or_id)
else:
service = service_or_id
service.update_status()
@task(ignore_result=True)
def update_instance(instance_or_id):
from .models import Instance
if not isinstance(instance_or_id, Instance):
instance = Instance.objects.get(id=instance_or_id)
else:
instance = instance_or_id
instance.update_status()
@task(ignore_result=True)
def update_shifts():
from .models import update_shifts as _update_shifts
_update_shifts()
@task(ignore_result=True)
def clean_db(days_to_retain=60):
"""
Clean up database otherwise it gets overwhelmed with StatusCheckResults.
To loop over undeleted results, spawn new tasks to make sure db connection closed etc
"""
from .models import StatusCheckResult, ServiceStatusSnapshot
from datetime import timedelta
to_discard_results = StatusCheckResult.objects.filter(time__lte=timezone.now()-timedelta(days=days_to_retain))
to_discard_snapshots = ServiceStatusSnapshot.objects.filter(time__lte=timezone.now()-timedelta(days=days_to_retain))
result_ids = to_discard_results.values_list('id', flat=True)[:100]
snapshot_ids = to_discard_snapshots.values_list('id', flat=True)[:100]
if not result_ids:
logger.info('Completed deleting StatusCheckResult objects')
if not snapshot_ids:
logger.info('Completed deleting ServiceStatusSnapshot objects')
if (not snapshot_ids) and (not result_ids):
return
logger.info('Processing %s StatusCheckResult objects' % len(result_ids))
logger.info('Processing %s ServiceStatusSnapshot objects' % len(snapshot_ids))
StatusCheckResult.objects.filter(id__in=result_ids).delete()
ServiceStatusSnapshot.objects.filter(id__in=snapshot_ids).delete()
clean_db.apply_async(kwargs={'days_to_retain': days_to_retain}, countdown=3)
|
{
"content_hash": "895689ad1bc466e505c4a216a36e8e24",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 120,
"avg_line_length": 34.473214285714285,
"alnum_prop": 0.7184667184667185,
"repo_name": "bonniejools/cabot",
"id": "3fc4d857fc97dac04b446762443bb4612a75951d",
"size": "3861",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cabot/cabotapp/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21910"
},
{
"name": "HTML",
"bytes": "76160"
},
{
"name": "JavaScript",
"bytes": "368548"
},
{
"name": "Python",
"bytes": "154400"
},
{
"name": "Shell",
"bytes": "7312"
}
],
"symlink_target": ""
}
|
import os
import rrt
LOG = rrt.get_log()
LOG.info("Starting %s" % rrt.get_version())
from rrt.hpc import env
ENV = env()
from maya import cmds
posix = lambda s: s.replace('\\', '/')
proj = posix(ENV['PROJECT'])
node_proj = posix(ENV['NODE_PROJECT'])
map_pairs = [
(node_proj, proj),
]
for name in os.listdir(ENV['PROJECT']):
full = os.path.join(ENV['PROJECT'], name)
if os.path.isdir(full):
map_pairs.append(('//'+name, posix(full)))
map_pairs.append((node_proj+'/'+name, posix(full)))
map_pairs.append((node_proj+'//'+name, posix(full)))
LOG.debug("Dirmaps:")
for m in map_pairs:
LOG.debug(m)
cmds.dirmap(mapDirectory=m)
cmds.dirmap(enable=True)
|
{
"content_hash": "1b1d411f5aa33b962365636c8cf5e578",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 60,
"avg_line_length": 23.516129032258064,
"alnum_prop": 0.6008230452674898,
"repo_name": "RCAD/ringling-render-tools",
"id": "f610821eec0fe6ab6f583e4c3a34cadd37c56d94",
"size": "729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rrt/hpc/maya/startup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114289"
}
],
"symlink_target": ""
}
|
""" Evologics modem driver
This is a ROS node which works as a bridge between the software and the Evologics devices.
S2C product family is supported by this driver, both "normal" and "USBL" versions, both serial and tcp/ip connection protocols.
"""
# TODO: Test im, burst sending
# TODO: Add documentation
from __future__ import division
__author__ = 'nick'
import argparse
import traceback
import numpy as np # do I need numpy???
np.set_printoptions(precision=3, suppress=True)
import serial
import time
import sys
#ROS imports
import roslib
roslib.load_manifest('evologics_driver')
import rospy
import evologics_driver as ed
from vehicle_interface.msg import AcousticModemStatus, AcousticModemPayload, AcousticModemAck, AcousticModemUSBLANGLES, AcousticModemUSBLLONG
from diagnostic_msgs.msg import KeyValue
from vehicle_interface.srv import BooleanService, BooleanServiceResponse
# modem status
MODEM_DISABLED = 0
MODEM_ENABLED = 1
OK = 0
ERROR = 1
TIMEOUT_PROP_READ = 5
MODEM_STATUS = {
MODEM_DISABLED: AcousticModemStatus.MODEM_DISABLED,
MODEM_ENABLED: AcousticModemStatus.MODEM_ENABLED
}
# ros topics
# instant message
TOPIC_IM_IN = 'im/in'
TOPIC_IM_OUT = 'im/out'
TOPIC_IM_ACK = 'im/ack'
# synchronous instant message
TOPIC_IMS_IN = 'ims/in'
TOPIC_IMS_OUT = 'ims/out'
TOPIC_IMS_ACK = 'ims/ack'
# burst message
TOPIC_BURST_IN = 'burst/in'
TOPIC_BURST_OUT = 'burst/out'
TOPIC_BURST_ACK = 'burst/ack'
# modem status
TOPIC_STATUS = 'status'
# USBL data
TOPIC_USBLLLONG = "usbllong"
TOPIC_USBLANGLES = "usblangles"
# modem switch service
SRV_SWITCH = 'switch'
SRV_BATTERY = 'battery'
# default configs
DEFAULT_SERIAL = {
'port': '/dev/ttyUSB0',
'baudrate': 19200,
'parity': serial.PARITY_NONE,
'stopbits': serial.STOPBITS_ONE,
'timeout': 0.1,
'bytesize': serial.EIGHTBITS
}
DEFAULT_TCP = {
'ip': '192.168.0.212', # Device IP (192.168.0.212 is the USBL)
'port': 9200, # Port number
'type': 'lr', # 'lr': lister-respawn, 'ls': listen-single, 'c': client
}
DEFAULT_CONFIG = {
'connection_type': "SERIAL", # "TCP/IP" or "SERIAL"
'read_rate': 10, # Hz
'battery_read_rate': 0.01, # Hz
'status_rate': 0.1, # Hz
'source_level': 3, # From 0 (max) to 3 (min)
'serial_config': DEFAULT_SERIAL,
'tcp_config': DEFAULT_TCP
}
# define the length of incoming data messages (in tokens)
DATA_MSGS_LENGTHS = {
'IM': 64,
'BURST': 1024,
'RECV': 10,
'RECVIM': 10,
'RECVIMS': 10,
'RECVPBM': 9
}
class ModemNode(object):
"""ModemDriver class represents the ROS interface for the Evologics Acoustic Modems
This class implements all the required functionality to communicate with the modems. It allows transmission and
reception of different types of messages as described in the user manual. It also provides localisation
information for USBL modems and modem status information.
"""
def __init__(self, name, config, **kwargs):
""" Class initialization method
This method sets the node parameters: node name, update rate, etc...
It also initializes publishers and subscribers and opens the connection with the device
:param name: name of the node
:param config: dictionary with the configuration (same structure as DEFAULT_CONFIG)
:param kwargs: verbose
"""
self.name = name
self.sent_im_ack_cnt = 0
self.sent_burst_cnt = 0
self.received_cnt = 0
self.im_ack_cnt = 0
self.burst_ack_cnt = 0
if config['connection_type'] == "SERIAL":
self.modem = ed.SerialModemDriver(serial_config=config['serial_config'])
elif config['connection_type'] == "TCP/IP":
self.modem = ed.TCPModemDriver(tcp_config=config['tcp_config'])
rospy.loginfo('%s: Modem connected' % self.name)
line = self.modem.set_source_level(config['source_level'])
rospy.loginfo('%s: Received line: %s' % (self.name, repr(line)))
self.verbose = kwargs.get('verbose', False)
# Dictionary containing the message types sent by the device, when called it redirects you to the proper method
# TODO complete the dictionary for all device inputs
# TODO: add default
self.dict_messages_types = {
'RECV': self.parse_acoustic_bm,
'DELIVERED': self.parse_delivered,
'FAILED': self.parse_failed,
'RECVIM': self.parse_acoustic_im,
'DELIVEREDIM': self.parse_delivered,
'FAILEDIM': self.parse_failed,
'USBLLONG': self.parse_usbllong,
'USBLANGLES': self.parse_usblangles,
'OK': self.parse_ok,
'[*]OK': self.parse_ok
}
# timing
self.read_rate = config['read_rate']
self.dt_read = 1.0 / self.read_rate
self.driver_loop = rospy.Rate(self.read_rate)
# TODO: Timers are not supported on ROS diamondback [Emily boat]
# self.driver_status = rospy.Timer(rospy.Duration(1.0 / config['status_rate']), self.send_status)
# self.battery_read = ros py.Timer(rospy.Duration(1.0 / config['battery_read_rate']), ___)
self.status_period = 1.0/config['status_rate']
self.last_status_t = 0
self.battery_read_period = 1/config['battery_read_rate']
self.last_battery_t = 0
# initial status
self.modem_status = MODEM_ENABLED
self.battery_level = 0
self.propagation_time_flag = False
self.propagation_time = 0
prefix = self.name + '/'
# instant message publishers and subscribers
self.pub_im_in = rospy.Publisher(prefix + TOPIC_IM_IN, AcousticModemPayload, tcp_nodelay=True, queue_size=1)
self.pub_im_ack = rospy.Publisher(prefix + TOPIC_IM_ACK, AcousticModemAck, tcp_nodelay=True, queue_size=1)
self.sub_im_out = rospy.Subscriber(prefix + TOPIC_IM_OUT, AcousticModemPayload, self.handle_im_out, tcp_nodelay=True, queue_size=1)
# synchronous instant message publishers and subscribers
self.pub_ims_in = rospy.Publisher(prefix + TOPIC_IMS_IN, AcousticModemPayload, tcp_nodelay=True, queue_size=1)
self.pub_ims_ack = rospy.Publisher(prefix + TOPIC_IMS_ACK, AcousticModemAck, tcp_nodelay=True, queue_size=1)
self.sub_ims_out = rospy.Subscriber(prefix + TOPIC_IMS_OUT, AcousticModemPayload, self.handle_ims_out, tcp_nodelay=True, queue_size=1)
# burst message publishers and subscribers
self.pub_burst_in = rospy.Publisher(prefix + TOPIC_BURST_IN, AcousticModemPayload, tcp_nodelay=True, queue_size=1)
self.pub_burst_ack = rospy.Publisher(prefix + TOPIC_BURST_ACK, AcousticModemAck, tcp_nodelay=True, queue_size=1)
self.sub_burst_out = rospy.Subscriber(prefix + TOPIC_BURST_OUT, AcousticModemPayload, self.handle_burst_out, tcp_nodelay=True, queue_size=1)
# USBL data publishers
self.pub_usbllong = rospy.Publisher(prefix + TOPIC_USBLLLONG, AcousticModemUSBLLONG, tcp_nodelay=True, queue_size=1)
self.pub_usblangles = rospy.Publisher(prefix + TOPIC_USBLANGLES, AcousticModemUSBLANGLES, tcp_nodelay=True, queue_size=1)
# status publisher
self.pub_status = rospy.Publisher(prefix + TOPIC_STATUS, AcousticModemStatus, tcp_nodelay=True, queue_size=1)
# services
self.s_switch = rospy.Service(SRV_SWITCH, BooleanService, self.srv_switch)
# optional info
if self.verbose:
t_pri = rospy.Timer(rospy.Duration(0.5), self.print_info)
def handle_im_out(self, msg):
"""
ROS callback
Send an instant message to the device
:param msg: ROS message, type: AcousticModemPayload
"""
if self.modem_status:
err_code, cmd_sent = self.modem.add_im_to_buffer(msg.address, msg.ack, msg.payload)
self.eval_error_code(err_code, cmd_sent)
if err_code in [ed.OK, ed.WARN_ACK_BUSY]:
self.sent_im_ack_cnt += 1
self.modem.send_line()
else:
rospy.warn("%s: Modem is disabled! Command ignored" % self.name)
def handle_ims_out(self, msg):
# Publish to serial port
# To be done
pass
def handle_burst_out(self, msg):
"""
ROS callbackself.propagation_time
Send an burst message to the device
:param msg: ROS message, type: AcousticModemPayload
"""
if self.modem_status:
err_code, cmd_sent = self.modem.add_burst_to_buffer(msg.address, msg.payload)
self.eval_error_code(err_code, cmd_sent)
# print self.sent_burst_cnt
if err_code in [ed.OK, ed.WARN_ACK_BUSY]:
self.sent_burst_cnt += 1
else:
rospy.warn("%s: Modem is disabled! Command ignored" % self.name)
# safety switch service
def srv_switch(self, req):
"""This method handles the switch service.
This will enable/disable the modem driver.
"""
if req.request is True:
# enable the low-level controller
self.modem_status = MODEM_ENABLED
return BooleanServiceResponse(True)
else:
self.modem_status = MODEM_DISABLED
return BooleanServiceResponse(False)
# status publisher
def send_status(self, event=None):
msg = AcousticModemStatus()
msg.header.stamp = rospy.Time.now()
msg.status = MODEM_STATUS[self.modem_status]
msg.battery_level = self.battery_level
# print 'im_ack_cnt', self.im_ack_cnt
# print 'burst_ack_cnt', self.burst_ack_cnt
if self.sent_im_ack_cnt > 0 and self.sent_burst_cnt > 0:
info = {
'im_ack_success_rate': str(self.im_ack_cnt/self.sent_im_ack_cnt),
'im_burst_rate': str(self.burst_ack_cnt/self.sent_burst_cnt),
'total_sent': str(self.sent_burst_cnt + self.sent_im_ack_cnt),
'total_received': str(self.received_cnt)
}
msg.info = [KeyValue(key, value) for key, value in info.items()]
self.pub_status.publish(msg)
# Parsing methods
def parse_acoustic_bm(self, tokens):
"""
This method parse an incoming burst message and publish its content on a dedicated topic
:param tokens: list of message fields
"""
msg = AcousticModemPayload()
msg.header.stamp = rospy.Time.now()
msg.address = int(tokens[2])
msg.bitrate = int(tokens[4])
msg.rssi = float(tokens[5])
msg.integrity = float(tokens[6])
msg.propagation_time = int(tokens[7])
self.propagation_time = int(tokens[7])
# print 'Distance', self.propagation_time * 1500 * 10**-6
msg.relative_velocity = float(tokens[8])
msg.payload = ','.join(tokens[9:])
rospy.loginfo('%s: Received burst message from node %s, payload: %s, info: %s' % (self.name, msg.address, repr(msg.payload), repr(msg.info)))
self.received_cnt += 1
self.pub_burst_in.publish(msg)
def parse_acoustic_im(self, tokens):
"""
This method parses an incoming instant message and publish its content on a dedicated topic
:param tokens: list of message fields
"""
msg = AcousticModemPayload()
msg.header.stamp = rospy.Time.now()
msg.address = int(tokens[2])
msg.duration = int(tokens[5])
msg.rssi = float(tokens[6])
msg.integrity = float(tokens[7])
msg.relative_velocity = float(tokens[8])
msg.payload = ','.join(tokens[9:])
rospy.loginfo('%s: Received IM from node %s, payload: %s, info: %s' %(self.name, msg.address, repr(msg.payload), repr(msg.info)))
self.received_cnt += 1
self.pub_im_in.publish(msg)
def parse_usbllong(self, tokens):
"""
method which will generate a ros message from USBL data
:param tokens: list of message fields
"""
if len(tokens) != 17:
rospy.logerr("%s: USBLLONG message fields count doesn't match" % self.name)
return
usblmsg = AcousticModemUSBLLONG()
usblmsg.header.stamp = rospy.Time.now()
usblmsg.measurement_time = float(tokens[2])
usblmsg.remote_address = int(tokens[3])
usblmsg.X = float(tokens[4])
usblmsg.Y = float(tokens[5])
usblmsg.Z = float(tokens[6])
usblmsg.E = float(tokens[7])
usblmsg.N = float(tokens[8])
usblmsg.U = float(tokens[9])
usblmsg.roll = float(tokens[10]) # RPY of local device
usblmsg.pitch = float(tokens[11])
usblmsg.yaw = float(tokens[12])
usblmsg.propagation_time = float(tokens[13])
usblmsg.accuracy = float(tokens[-1])
rospy.loginfo('%s: Received USBLLONG data' %(self.name))
self.received_cnt += 1
self.pub_usbllong.publish(usblmsg)
def parse_usblangles(self, tokens):
"""
method which will generate a ros message from USBL data
:param tokens: list of message fields
"""
if len(tokens) != 14:
rospy.logerr("%s: USBLANGLES message fields count doesn't match" % self.name)
return
usblmsg = AcousticModemUSBLANGLES()
usblmsg.header.stamp = rospy.Time.now()
usblmsg.measurement_time = float(tokens[2])
usblmsg.remote_address = int(tokens[3])
usblmsg.lbearing = float(tokens[4])
usblmsg.lelevation = float(tokens[5])
usblmsg.bearing = float(tokens[6])
usblmsg.elevation = float(tokens[7])
usblmsg.roll = float(tokens[8]) # RPY of local device
usblmsg.pitch = float(tokens[9])
usblmsg.yaw = float(tokens[10])
usblmsg.accuracy = float(tokens[13])
rospy.loginfo('%s: Received USBLANGLES data' %(self.name))
self.received_cnt += 1
self.pub_usblangles.publish(usblmsg)
def parse_delivered(self, tokens):
rospy.loginfo("%s: Message delivered to node %s" %(self.name,tokens[1]))
# print self.modem.last_ack_type
if self.modem.last_ack_type == "imack":
self.propagation_time_flag = True # to check propagation time later
self.im_ack_cnt += 1
msg = AcousticModemAck()
msg.header.stamp = rospy.Time.now()
msg.ack = True
self.pub_im_ack.publish(msg)
elif self.modem.last_ack_type == "burstack":
self.burst_ack_cnt += 1
msg = AcousticModemAck()
msg.header.stamp = rospy.Time.now()
msg.ack = True
self.pub_burst_ack.publish(msg)
self.modem.last_ack_type = ""
def parse_failed(self, tokens):
rospy.logwarn("%s: Message delivering failed to node %s" %(self.name,tokens[1]))
if self.modem.last_ack_type == "imack":
msg = AcousticModemAck()
msg.header.stamp = rospy.Time.now()
msg.ack = False
self.pub_im_ack.publish(msg)
elif self.modem.last_ack_type == "burstack":
msg = AcousticModemAck()
msg.header.stamp = rospy.Time.now()
msg.ack = False
self.pub_burst_ack.publish(msg)
self.modem.last_ack_type = ""
def parse_ok(self,tokens):
pass
def eval_error_code(self, code, cmd_sent):
if code == ed.ERROR_DATA_TOO_LONG:
rospy.logerr("%s: Data too long! Command ignored!" % self.name)
elif code == ed.ERROR_MODEM_BUSY:
rospy.logerr("%s: Device busy! Command ignored!" % self.name)
elif code == ed.WARN_ACK_BUSY:
rospy.logwarn("%s: Still waiting for previous ack but starting new one")
elif code == ed.OK:
# print self.name, cmd_sent
rospy.loginfo("%s: Command sent: %s" % (self.name, cmd_sent))
return OK
return ERROR
def eval_voltage_scheduler(self):
time_since_measurement = rospy.Time.now().to_sec() - self.last_battery_t
if time_since_measurement > self.battery_read_period:
self.last_battery_t = rospy.Time.now().to_sec()
code, cmd = self.modem.read_battery_level()
self.eval_error_code(code, cmd)
time.sleep(0.1)
self.modem.read_device_to_buffer()
try:
line = self.modem.read_buffer.pop()
self.battery_level = float(line)
except ValueError:
rospy.logwarn("%s: Battery read failed - value not updated. Command-response mismatch?" % self.name)
self.parse_line(line)
except IndexError:
rospy.logwarn("%s: Propagation time read failed - value not updated. Modem not connected?" % self.name)
def eval_propagation_time(self):
if self.propagation_time_flag:
code, cmd = self.modem.read_propagation_time()
self.eval_error_code(code, cmd)
time.sleep(0.1)
self.modem.read_device_to_buffer()
try:
line = self.modem.read_buffer.pop()
self.propagation_time = float(line)
print 'Distance', self.propagation_time * 1500 * 10**-6
self.propagation_time_flag = False
except ValueError:
rospy.logwarn("%s: Propagation time read failed - value not updated. Command-response mismatch?" % self.name)
self.parse_line(line)
except IndexError:
rospy.logwarn("%s: Propagation time read failed - value not updated. Modem not connected?" % self.name)
def eval_modem_status(self):
time_since_status = rospy.Time.now().to_sec() - self.last_status_t
if time_since_status > self.status_period:
self.last_status_t = rospy.Time.now().to_sec()
self.send_status()
def parse_line(self, line):
"""
Function called in the main loop, just read what is coming on the port and parse it using a dictionary
The frequency is set by the parameter read_rate
"""
rospy.loginfo('%s: Received line: %s' % (self.name, repr(line)))
tokens = line.split(',')
first_token = tokens[0] # command
if first_token in self.dict_messages_types.keys():
self.dict_messages_types[first_token](tokens)
else:
rospy.logwarn('%s: Failed to parse line: %s' % (self.name, line))
def parse_buffer(self):
self.modem.read_device_to_buffer()
while len(self.modem.read_buffer) > 0:
line = self.modem.read_buffer.pop()
self.parse_line(line)
def run(self):
# driver loop
while not rospy.is_shutdown():
self.parse_buffer()
self.eval_voltage_scheduler()
self.parse_buffer()
self.eval_propagation_time()
self.modem.send_line()
self.eval_modem_status()
try:
self.driver_loop.sleep()
except rospy.ROSInterruptException:
rospy.loginfo('%s shutdown requested ...', self.name)
self.modem.close()
# verbose info print
def print_info(self, event=None):
print(self)
def __str__(self):
return """modem:
enabled: %s
""" % (
self.modem_status
)
def main():
rospy.init_node('modem_driver')
name = rospy.get_name()
rospy.loginfo('%s initializing ...', name)
# parse args
# ros_args[0] is always the program name
ros_args = rospy.myargv()
parser = argparse.ArgumentParser(
description='Modem Driver ROS Node. This node is communicating with the Evologics modem.',
#epilog='This is part of vehicle_pilot module.'
)
parser.add_argument('-v', '--verbose', action='store_true', help='Print detailed information.')
args = parser.parse_args(ros_args[1:])
if args.verbose:
verbose = True
else:
verbose = False
config = DEFAULT_CONFIG.copy()
# load global parameters
param_config = rospy.get_param('~modem_config', {})
# Update default settings with user specified params
config.update(param_config)
# show current settings
rospy.loginfo('%s modem config: %s', name, config)
# start vehicle control node
driver = ModemNode(name, config, verbose=verbose)
try:
driver.run()
except Exception:
tb = traceback.format_exc()
rospy.logfatal('%s uncaught exception, dying!\n%s', name, tb)
if __name__ == '__main__':
main()
|
{
"content_hash": "ed49d4e6eb6719d8334e40ff825e1a83",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 149,
"avg_line_length": 37.32911392405063,
"alnum_prop": 0.6130891827738216,
"repo_name": "oceansystemslab/evologics_driver",
"id": "0b5fc0ae55c534b49d722c5ba2c1eb3ba9816c3e",
"size": "20689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/evologics_node.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "1203"
},
{
"name": "Python",
"bytes": "71771"
},
{
"name": "Shell",
"bytes": "1568"
}
],
"symlink_target": ""
}
|
"""Generate Crosswalk Extension source files.
If run itself, caches Jinja templates (and creates dummy file for build,
since cache filenames are unpredictable and opaque).
This module is *not* concurrency-safe without care: bytecode caching creates
a race condition on cache *write* (crashes if one process tries to read a
partially-written cache). However, if you pre-cache the templates (by running
the module itself), then you can parallelize compiling individual files, since
cache *reading* is safe.
Input: An object of class IdlDefinitions, containing an IDL interface X
Output: V8X.h and V8X.cpp
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import os
import posixpath
import re
import sys
# Path handling for libraries and templates
# Paths have to be normalized because Jinja uses the exact template path to
# determine the hash used in the cache filename, and we need a pre-caching step
# to be concurrency-safe. Use absolute path because __file__ is absolute if
# module is imported, and relative if executed directly.
# If paths differ between pre-caching and individual file compilation, the cache
# is regenerated, which causes a race condition and breaks concurrent build,
# since some compile processes will try to read the partially written cache.
module_path, module_filename = os.path.split(os.path.realpath(__file__))
third_party_dir = os.path.normpath(os.path.join(
module_path, os.pardir, os.pardir, os.pardir, os.pardir))
templates_dir = os.path.normpath(os.path.join(
module_path, os.pardir, 'templates'))
# Make sure extension is .py, not .pyc or .pyo, so doesn't depend on caching
module_pyname = os.path.splitext(module_filename)[0] + '.py'
# jinja2 is in chromium's third_party directory.
# Insert at 1 so at front to override system libraries, and
# after path[0] == invoking script dir
sys.path.insert(1, third_party_dir)
import jinja2
import idl_types
from idl_types import IdlType
import ext_callback_interface
import ext_dictionary
from ext_globals import includes, interfaces
import ext_interface
import ext_types
from ext_utilities import capitalize, cpp_name, conditional_string, ext_class_name
KNOWN_COMPONENTS = frozenset(['core', 'modules'])
def render_template(interface_info, header_template, cpp_template,
template_context):
template_context['code_generator'] = module_pyname
# Add includes for any dependencies
template_context['header_includes'] = sorted(
template_context['header_includes'])
includes.update(interface_info.get('dependencies_include_paths', []))
template_context['cpp_includes'] = sorted(includes)
header_text = header_template.render(template_context)
cpp_text = cpp_template.render(template_context)
return header_text, cpp_text
class CodeGeneratorBase(object):
"""Base class for ext bindings generator and IDL dictionary impl generator"""
def __init__(self, interfaces_info, cache_dir, output_dir):
interfaces_info = interfaces_info or {}
self.interfaces_info = interfaces_info
self.jinja_env = initialize_jinja_env(cache_dir)
self.output_dir = output_dir
# Set global type info
idl_types.set_ancestors(dict(
(interface_name, interface_info['ancestors'])
for interface_name, interface_info in interfaces_info.iteritems()
if interface_info['ancestors']))
IdlType.set_callback_interfaces(set(
interface_name
for interface_name, interface_info in interfaces_info.iteritems()
if interface_info['is_callback_interface']))
IdlType.set_dictionaries(set(
dictionary_name
for dictionary_name, interface_info in interfaces_info.iteritems()
if interface_info['is_dictionary']))
IdlType.set_implemented_as_interfaces(dict(
(interface_name, interface_info['implemented_as'])
for interface_name, interface_info in interfaces_info.iteritems()
if interface_info['implemented_as']))
IdlType.set_garbage_collected_types(set(
interface_name
for interface_name, interface_info in interfaces_info.iteritems()
if 'GarbageCollected' in interface_info['inherited_extended_attributes']))
IdlType.set_will_be_garbage_collected_types(set(
interface_name
for interface_name, interface_info in interfaces_info.iteritems()
if 'WillBeGarbageCollected' in interface_info['inherited_extended_attributes']))
ext_types.set_component_dirs(dict(
(interface_name, interface_info['component_dir'])
for interface_name, interface_info in interfaces_info.iteritems()))
def generate_code(self, definitions, definition_name):
"""Returns .js/.java code as ((path, content)...)."""
# Set local type info
IdlType.set_callback_functions(definitions.callback_functions.keys())
IdlType.set_enums((enum.name, enum.values)
for enum in definitions.enumerations.values())
return self.generate_code_internal(definitions, definition_name)
def generate_code_internal(self, definitions, definition_name):
# This should be implemented in subclasses.
raise NotImplementedError()
class CodeGeneratorExt(CodeGeneratorBase):
def __init__(self, interfaces_info, cache_dir, output_dir):
CodeGeneratorBase.__init__(self, interfaces_info, cache_dir, output_dir)
def output_paths(self, definition_name):
js_path = posixpath.join(self.output_dir, '%s.js' % definition_name)
java_path = posixpath.join(self.output_dir, '%s.java' % definition_name)
return js_path, java_path
def generate_code_internal(self, definitions, definition_name):
if definition_name in definitions.interfaces:
return self.generate_interface_code(
definitions, definition_name,
definitions.interfaces[definition_name])
if definition_name in definitions.dictionaries:
return self.generate_dictionary_code(
definitions, definition_name,
definitions.dictionaries[definition_name])
raise ValueError('%s is not in IDL definitions' % definition_name)
def generate_interface_code(self, definitions, interface_name, interface):
# Store other interfaces for introspection
interfaces.update(definitions.interfaces)
# Select appropriate Jinja template and contents function
if interface.is_callback:
js_template_filename = 'callback_interface.js'
java_template_filename = 'callback_interface.java'
interface_context = ext_callback_interface.callback_interface_context
else:
js_template_filename = 'interface.js'
java_template_filename = 'interface.java'
interface_context = ext_interface.interface_context
js_template = self.jinja_env.get_template(js_template_filename)
java_template = self.jinja_env.get_template(java_template_filename)
interface_info = self.interfaces_info[interface_name]
template_context = interface_context(interface)
# Add the include for interface itself
template_context['header_includes'].add(interface_info['include_path'])
js_text, java_text = render_template(
interface_info, js_template, java_template, template_context)
js_path, java_path = self.output_paths(interface_name)
return (
(js_path, js_text),
(java_path, java_text),
)
def generate_dictionary_code(self, definitions, dictionary_name,
dictionary):
header_template = self.jinja_env.get_template('dictionary_ext.h')
cpp_template = self.jinja_env.get_template('dictionary_ext.cpp')
template_context = ext_dictionary.dictionary_context(dictionary)
interface_info = self.interfaces_info[dictionary_name]
# Add the include for interface itself
template_context['header_includes'].add(interface_info['include_path'])
header_text, cpp_text = render_template(
interface_info, header_template, cpp_template, template_context)
header_path, cpp_path = self.output_paths(dictionary_name)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
class CodeGeneratorDictionaryImpl(CodeGeneratorBase):
def __init__(self, interfaces_info, cache_dir, output_dir):
CodeGeneratorBase.__init__(self, interfaces_info, cache_dir, output_dir)
def output_paths(self, definition_name, interface_info):
if interface_info['component_dir'] in KNOWN_COMPONENTS:
output_dir = posixpath.join(self.output_dir,
interface_info['relative_dir'])
else:
output_dir = self.output_dir
header_path = posixpath.join(output_dir, '%s.h' % definition_name)
cpp_path = posixpath.join(output_dir, '%s.cpp' % definition_name)
return header_path, cpp_path
def generate_code_internal(self, definitions, definition_name):
if not definition_name in definitions.dictionaries:
raise ValueError('%s is not an IDL dictionary')
dictionary = definitions.dictionaries[definition_name]
interface_info = self.interfaces_info[definition_name]
header_template = self.jinja_env.get_template('dictionary_impl.h')
cpp_template = self.jinja_env.get_template('dictionary_impl.cpp')
template_context = ext_dictionary.dictionary_impl_context(
dictionary, self.interfaces_info)
header_text, cpp_text = render_template(
interface_info, header_template, cpp_template, template_context)
header_path, cpp_path = self.output_paths(
definition_name, interface_info)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
def initialize_jinja_env(cache_dir):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
# Bytecode cache is not concurrency-safe unless pre-cached:
# if pre-cached this is read-only, but writing creates a race condition.
bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
keep_trailing_newline=True, # newline-terminate generated files
lstrip_blocks=True, # so can indent control flow tags
trim_blocks=True)
jinja_env.filters.update({
'blink_capitalize': capitalize,
'conditional': conditional_if_endif,
'exposed': exposed_if,
'per_context_enabled': per_context_enabled_if,
'runtime_enabled': runtime_enabled_if,
})
return jinja_env
def generate_indented_conditional(code, conditional):
# Indent if statement to level of original code
indent = re.match(' *', code).group(0)
return ('%sif (%s) {\n' % (indent, conditional) +
' %s\n' % '\n '.join(code.splitlines()) +
'%s}\n' % indent)
# [Conditional]
def conditional_if_endif(code, conditional_string):
# Jinja2 filter to generate if/endif directive blocks
if not conditional_string:
return code
return ('#if %s\n' % conditional_string +
code +
'#endif // %s\n' % conditional_string)
# [Exposed]
def exposed_if(code, exposed_test):
if not exposed_test:
return code
return generate_indented_conditional(code, 'context && (%s)' % exposed_test)
# [PerContextEnabled]
def per_context_enabled_if(code, per_context_enabled_function):
if not per_context_enabled_function:
return code
return generate_indented_conditional(code, 'context && context->isDocument() && %s(toDocument(context))' % per_context_enabled_function)
# [RuntimeEnabled]
def runtime_enabled_if(code, runtime_enabled_function_name):
if not runtime_enabled_function_name:
return code
return generate_indented_conditional(code, '%s()' % runtime_enabled_function_name)
################################################################################
def main(argv):
# If file itself executed, cache templates
try:
cache_dir = argv[1]
dummy_filename = argv[2]
except IndexError as err:
print 'Usage: %s CACHE_DIR DUMMY_FILENAME' % argv[0]
return 1
# Cache templates
jinja_env = initialize_jinja_env(cache_dir)
template_filenames = [filename for filename in os.listdir(templates_dir)
# Skip .svn, directories, etc.
if filename.endswith(('.cpp', '.h'))]
for template_filename in template_filenames:
jinja_env.get_template(template_filename)
# Create a dummy file as output for the build system,
# since filenames of individual cache files are unpredictable and opaque
# (they are hashes of the template path, which varies based on environment)
with open(dummy_filename, 'w') as dummy_file:
pass # |open| creates or touches the file
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "38d357d554f85d44ff548cfb240ab9c7",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 140,
"avg_line_length": 42.95454545454545,
"alnum_prop": 0.6710506424792139,
"repo_name": "hgl888/crosswalk-android-extensions",
"id": "29da8cc9a57dd1c869731042ac14e714a2ec0506",
"size": "14760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/idl-generator/third_party/WebKit/Source/bindings/scripts/code_generator_ext.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "75866"
},
{
"name": "JavaScript",
"bytes": "15654"
},
{
"name": "Python",
"bytes": "95429"
}
],
"symlink_target": ""
}
|
""" route bundles of port (river routing)
"""
from typing import Callable, List, Optional
from numpy import float64, ndarray
import numpy as np
from pp.routing.connect import connect_strip
from pp.routing.connect import connect_elec_waypoints
from pp.routing.connect import connect_strip_way_points
from pp.routing.manhattan import generate_manhattan_waypoints
from pp.routing.u_groove_bundle import u_bundle_indirect
from pp.routing.u_groove_bundle import u_bundle_direct
from pp.routing.corner_bundle import corner_bundle
from pp.routing.path_length_matching import path_length_matched_points
from pp.name import autoname
from pp.component import ComponentReference, Component
from pp.port import Port
from pp.config import conf
METAL_MIN_SEPARATION = 10.0
BEND_RADIUS = conf.tech.bend_radius
def connect_bundle(
start_ports,
end_ports,
route_filter=connect_strip_way_points,
separation=5.0,
bend_radius=BEND_RADIUS,
extension_length=0,
**kwargs,
):
""" Connects bundle of ports using river routing.
Chooses the correct u_bundle to use based on port angles
Args:
start_ports should all be facing in the same direction
end_ports should all be facing in the same direction
route_filter: function to connect
separation: waveguide separation
bend_radius: for the routes
extension_length: adds waveguide extension
"""
# Accept dict ot list
if isinstance(start_ports, dict):
start_ports = list(start_ports.values())
if isinstance(end_ports, dict):
end_ports = list(end_ports.values())
nb_ports = len(start_ports)
for p in start_ports:
p.angle = int(p.angle) % 360
for p in end_ports:
p.angle = int(p.angle) % 360
assert len(end_ports) == nb_ports
assert (
len(set([p.angle for p in start_ports])) <= 1
), "All start port angles should be the same"
assert (
len(set([p.angle for p in end_ports])) <= 1
), "All end port angles should be the same"
# Ensure the correct bend radius is used
def _route_filter(*args, **kwargs):
kwargs["bend_radius"] = bend_radius
return route_filter(*args, **kwargs)
params = {
"start_ports": start_ports,
"end_ports": end_ports,
"route_filter": _route_filter,
"separation": separation,
"bend_radius": bend_radius,
}
start_angle = start_ports[0].angle
end_angle = end_ports[0].angle
start_axis = "X" if start_angle in [0, 180] else "Y"
end_axis = "X" if end_angle in [0, 180] else "Y"
x_start = np.mean([p.x for p in start_ports])
x_end = np.mean([p.x for p in end_ports])
y_start = np.mean([p.y for p in start_ports])
y_end = np.mean([p.y for p in end_ports])
if start_axis == end_axis:
if (
start_angle == 0
and end_angle == 180
and x_start < x_end
or start_angle == 180
and end_angle == 0
and x_start > x_end
or start_angle == 90
and end_angle == 270
and y_start < y_end
or start_angle == 270
and end_angle == 90
and y_start > y_end
):
return link_ports(**params, **kwargs)
elif start_angle == end_angle:
return u_bundle_direct(**params, **kwargs)
elif end_angle == (start_angle + 180) % 360:
params["extension_length"] = extension_length
return u_bundle_indirect(**params, **kwargs)
else:
raise NotImplementedError("This should never happen")
else:
return corner_bundle(**params, **kwargs)
raise NotImplementedError("Routing along different axis not implemented yet")
def get_port_x(port: Port) -> float64:
return port.midpoint[0]
def get_port_y(port):
return port.midpoint[1]
def get_port_width(port):
return port.width
def are_decoupled(x1, x1p, x2, x2p, sep=METAL_MIN_SEPARATION):
if x2p + sep > x1:
return False
if x2 < x1p + sep:
return False
if x2 < x1p - sep:
return False
return True
def link_ports(
start_ports: List[Port],
end_ports: List[Port],
separation: float = 5.0,
route_filter: Callable = connect_strip_way_points,
**routing_params,
) -> List[ComponentReference]:
"""Semi auto-routing for two lists of ports.
Args:
ports1: first list of ports
ports2: second list of ports
separation: minimum separation between two waveguides
axis: specifies "X" or "Y"
X (resp. Y) -> indicates that the ports should be sorted and
compared using the X (resp. Y) axis
bend_radius: If unspecified, attempts to get it from the waveguide definition of the first port in ports1
route_filter: filter to apply to the manhattan waypoints
e.g `connect_strip_way_points` for deep etch strip waveguide
end_straight_offset: offset to add at the end of each waveguide
sort_ports: * True -> sort the ports according to the axis.
* False -> no sort applied
compute_array_separation_only: If True, returns the min distance which should be used between the two arrays instead of returning the connectors. Useful for budgeting space before instantiating other components.
Returns:
`[route_filter(r) for r in routes]` where routes is a list of lists of coordinates
e.g with default `connect_strip_way_points`, returns a list of elements which can be added to a component
The routing assumes manhattan routing between the different ports.
The strategy is to modify `start_straight` and `end_straight` for each
waveguide such that waveguides do not collide.
.. code::
1 X X X X X X
|-----------| | | | | |-----------------------|
| |-----| | | |---------------| |
| | || |------| | |
2 X X X X X X
start: at the top
end: at the bottom
The general strategy is:
Group tracks which would collide together and apply the following method
on each group:
if x2 >= x1, increase ``end_straight``
(as seen on the right 3 ports)
otherwise, decrease ``end_straight``
(as seen on the first 2 ports)
We deal with negative end_straight by doing at the end
end_straights = end_straights - min(end_straights)
This method deals with different metal track/wg/wire widths too.
"""
routes = link_ports_routes(
start_ports,
end_ports,
separation=separation,
routing_func=generate_manhattan_waypoints,
**routing_params,
)
return [route_filter(route, **routing_params) for route in routes]
def link_ports_routes(
start_ports: List[Port],
end_ports: List[Port],
separation: float,
bend_radius: float = BEND_RADIUS,
routing_func: Callable = generate_manhattan_waypoints,
sort_ports: bool = True,
end_straight_offset: Optional[float] = None,
compute_array_separation_only: bool = False,
verbose: int = 0,
tol: float = 0.00001,
**kwargs,
) -> List[ndarray]:
"""
routing_func: Function used to connect two ports. Should be like `connect_strip`
"""
ports1 = start_ports
ports2 = end_ports
if start_ports[0].angle in [0, 180]:
axis = "X"
else:
axis = "Y"
if len(ports1) == 0 or len(ports2) == 0:
print("WARNING! Not linking anything, empty list of ports")
return []
if len(ports1) == 1 and len(ports2) == 1:
if end_straight_offset:
kwargs["end_straight"] = end_straight_offset
return [
routing_func(
ports1[0],
ports2[0],
start_straight=0.05,
bend_radius=bend_radius,
**kwargs,
)
]
elems = []
## Contains end_straight of tracks which need to be adjusted together
end_straights_in_group = []
## Once a group is finished, all the lengths are appended to end_straights
end_straights = []
## Axis along which we sort the ports
if axis in ["X", "x"]:
f_key1 = get_port_y
f_key2 = get_port_y
else:
f_key1 = get_port_x
f_key2 = get_port_x
if sort_ports:
ports1.sort(key=f_key1)
ports2.sort(key=f_key2)
## Keep track of how many ports should be routed together
number_o_connectors_in_group = 0
if axis in ["X", "x"]:
x1_prev = get_port_y(ports1[0])
x2_prev = get_port_y(ports2[0])
y0 = get_port_x(ports2[0])
y1 = get_port_x(ports1[0])
else: # X axis
x1_prev = get_port_x(ports1[0])
x2_prev = get_port_x(ports2[0])
y0 = get_port_y(ports2[0])
y1 = get_port_y(ports1[0])
s = sign(y0 - y1)
curr_end_straight = 0
end_straight_offset = end_straight_offset or 15.0
Le = end_straight_offset
has_close_x_ports = False
close_ports_thresh = 2 * bend_radius + 1.0
## First pass - loop on all the ports to find the tentative end_straights
_w = get_port_width
for i in range(len(ports1)):
if axis in ["X", "x"]:
x1 = get_port_y(ports1[i])
x2 = get_port_y(ports2[i])
y = get_port_x(ports2[i])
else:
x1 = get_port_x(ports1[i])
x2 = get_port_x(ports2[i])
y = get_port_y(ports2[i])
dx = abs(x2 - x1)
if dx < close_ports_thresh:
has_close_x_ports = True
"""
Compute the metal separation to use. This depends on the adjacent
metal track widths
"""
if i != len(ports1) - 1 and i != 0:
# Deal with any track which is not on the edge
max_width = max(_w(ports1[i + 1]), _w(ports1[i - 1]))
curr_sep = 0.5 * (_w(ports1[i]) + max_width) + separation
elif i == 0:
# Deal with start edge case
curr_sep = separation + 0.5 * (_w(ports1[0]) + _w(ports1[1]))
elif i == len(ports1) - 1:
# Deal with end edge case
curr_sep = separation + 0.5 * (_w(ports1[-2]) + _w(ports1[-1]))
if are_decoupled(x2, x2_prev, x1, x1_prev, sep=curr_sep):
"""
If this metal track does not impact the previous one, then
start a new group.
"""
L = min(end_straights_in_group)
end_straights += [max(x - L, 0) + Le for x in end_straights_in_group]
# Start new group
end_straights_in_group = []
curr_end_straight = 0
number_o_connectors_in_group = 0
else:
if x2 >= x1:
curr_end_straight += curr_sep
else:
curr_end_straight -= curr_sep
end_straights_in_group.append(curr_end_straight + (y - y0) * s)
number_o_connectors_in_group += 1
x1_prev = x1
x2_prev = x2
# Append the last group
L = min(end_straights_in_group)
end_straights += [max(x - L, 0) + Le for x in end_straights_in_group]
if compute_array_separation_only:
# If there is no port too close to each other in x, then there are
# only two bends per route
if not has_close_x_ports:
return max(end_straights) + 2 * bend_radius
else:
return max(end_straights) + 4 * bend_radius
## Second pass - route the ports pairwise
N = len(ports1)
for i in range(N):
if axis in ["X", "x"]:
x1 = get_port_y(ports1[i])
x2 = get_port_y(ports2[i])
else:
x1 = get_port_x(ports1[i])
x2 = get_port_x(ports2[i])
dx = abs(x2 - x1)
# If both ports are aligned, we just need a straight line
if dx < tol:
elems += [
routing_func(
ports1[i],
ports2[i],
start_straight=0,
end_straight=end_straights[i],
bend_radius=bend_radius,
**kwargs,
)
] #
# Annoying case where it is too tight for direct manhattan routing
elif dx < close_ports_thresh:
a = close_ports_thresh + abs(dx)
prt = ports1[i]
angle = prt.angle
dp_w = (0, -a) if axis in ["X", "x"] else (-a, 0)
dp_e = (0, a) if axis in ["X", "x"] else (a, 0)
do_step_aside = False
if i == 0:
## If westest port, then we can safely step on the west further
## First check whether we have to step
dx2 = ports1[i + 1].x - prt.x
req_x = 2 * bend_radius + ports2[i].x - ports1[i].x
if dx2 < req_x:
do_step_aside = True
dp = dp_w
elif i == N - 1:
## If eastest port, then we can safely step on the east further
## First check whether we have to step
dx2 = prt.x - ports1[i - 1].x
req_x = 2 * bend_radius + ports1[i].x - ports2[i].x
if dx2 < req_x:
do_step_aside = True
dp = dp_e
else:
## Otherwise find closest port and escape where/if space permit
dx1 = prt.x - ports1[i - 1].x
dx2 = ports1[i + 1].x - prt.x
do_step_aside = True
if dx2 > dx1:
dp = dp_e
else:
dp = dp_w
## If there is not enough space to step away, put a warning.
## This requires inspection on the mask. Raising an error
## would likely make it harder to debug. Here we will see
## a DRC error or an unwanted crossing on the mask.
if max(dx1, dx2) < a:
print(
"WARNING - high risk of collision in routing. \
Ports too close to each other."
)
_route = []
if do_step_aside:
tmp_port = prt.move_polar_copy(2 * bend_radius + 1.0, angle)
tmp_port.move(dp)
_route += [
routing_func(
prt, tmp_port.flip(), bend_radius=bend_radius, **kwargs
)
]
else:
tmp_port = prt
if verbose > 2:
print(
"STEPPING",
ports1[i].position,
tmp_port.position,
ports2[i].position,
)
_route += [
routing_func(
tmp_port,
ports2[i],
start_straight=0.05,
end_straight=end_straights[i],
bend_radius=bend_radius,
**kwargs,
)
]
elems += _route
# Usual case
else:
elems += [
routing_func(
ports1[i],
ports2[i],
start_straight=0.05,
end_straight=end_straights[i],
bend_radius=bend_radius,
**kwargs,
)
]
return elems
def generate_waypoints_connect_bundle(*args, **kwargs):
"""
returns a list of waypoints for each path generated with link_ports
"""
return connect_bundle(*args, route_filter=lambda x, **params: x, **kwargs)
def compute_ports_max_displacement(start_ports, end_ports):
if start_ports[0].angle in [0, 180]:
a1 = [p.y for p in start_ports]
a2 = [p.y for p in end_ports]
else:
a1 = [p.x for p in start_ports]
a2 = [p.x for p in end_ports]
return max(abs(max(a1) - min(a2)), abs(min(a1) - max(a2)))
def connect_bundle_path_length_match(
ports1,
ports2,
separation=30.0,
end_straight_offset=None,
bend_radius=BEND_RADIUS,
dL0=0,
nb_loops=1,
modify_segment_i=-2,
route_filter=connect_strip_way_points,
**kwargs,
):
"""
Args:
ports1,
ports2,
separation=30.0,
end_straight_offset=None,
bend_radius=BEND_RADIUS,
dL0=0,
nb_loops=1,
modify_segment_i=-2,
route_filter=connect_strip_way_points,
**kwargs: extra arguments for inner call to generate_waypoints_connect_bundle
Returns:
[route_filter(l) for l in list_of_waypoints]
"""
kwargs["separation"] = separation
# Heuristic to get a correct default end_straight_offset to leave
# enough space for path-length compensation
if end_straight_offset is None:
if modify_segment_i == -2:
end_straight_offset = (
compute_ports_max_displacement(ports1, ports2) / (2 * nb_loops)
+ separation
+ dL0
)
else:
end_straight_offset = 0
kwargs["end_straight_offset"] = end_straight_offset
kwargs["bend_radius"] = bend_radius
list_of_waypoints = generate_waypoints_connect_bundle(ports1, ports2, **kwargs)
list_of_waypoints = path_length_matched_points(
list_of_waypoints,
dL0=dL0,
bend_radius=bend_radius,
nb_loops=nb_loops,
modify_segment_i=modify_segment_i,
)
return [route_filter(waypoints) for waypoints in list_of_waypoints]
def link_electrical_ports(
ports1: List[Port],
ports2: List[Port],
separation: float = METAL_MIN_SEPARATION,
bend_radius: float = 0.0001,
link_dummy_ports=False,
route_filter: Callable = connect_elec_waypoints,
**kwargs,
) -> List[ComponentReference]:
""" Connect bundle of electrical ports
Args:
ports1: first list of ports
ports2: second list of ports
separation: minimum separation between two waveguides
axis: specifies "X" or "Y"
X (resp. Y) -> indicates that the ports should be sorted and
compared using the X (resp. Y) axis
bend_radius: If unspecified, attempts to get it from the waveguide definition of the first port in ports1
route_filter: filter to apply to the manhattan waypoints
e.g `connect_strip_way_points` for deep etch strip waveguide
end_straight_offset: offset to add at the end of each waveguide
sort_ports: * True -> sort the ports according to the axis.
* False -> no sort applied
compute_array_separation_only: If True, returns the min distance which should be used between the two arrays instead of returning the connectors. Useful for budgeting space before instantiating other components.
Returns:
list of references of the electrical routes
"""
if link_dummy_ports:
new_ports1 = ports1
new_ports2 = ports2
else:
def is_dummy(port):
return hasattr(port, "is_dummy") and port.is_dummy
to_keep1 = [not is_dummy(p) for p in ports1]
to_keep2 = [not is_dummy(p) for p in ports2]
to_keep = [x * y for x, y in zip(to_keep1, to_keep2)]
new_ports1 = [p for i, p in enumerate(ports1) if to_keep[i]]
new_ports2 = [p for i, p in enumerate(ports2) if to_keep[i]]
return link_ports(
new_ports1,
new_ports2,
separation,
bend_radius=bend_radius,
route_filter=route_filter,
**kwargs,
)
def link_optical_ports(
ports1: List[Port],
ports2: List[Port],
separation: float = 5.0,
route_filter: Callable = connect_strip_way_points,
bend_radius: float = BEND_RADIUS,
**kwargs,
) -> List[ComponentReference]:
""" connect bundle of optical ports
"""
return link_ports(
ports1,
ports2,
separation,
bend_radius=bend_radius,
route_filter=route_filter,
**kwargs,
)
def sign(x):
if x > 0:
return 1
else:
return -1
def get_min_spacing(
ports1: List[Port],
ports2: List[Port],
sep: float = 5.0,
sort_ports: bool = True,
radius: float = BEND_RADIUS,
) -> float:
"""
Returns the minimum amount of spacing required to create a given fanout
"""
if ports1[0].angle in [0, 180]:
axis = "X"
else:
axis = "Y"
j = 0
min_j = 0
max_j = 0
if sort_ports:
if axis in ["X", "x"]:
ports1.sort(key=get_port_y)
ports2.sort(key=get_port_y)
else:
ports1.sort(key=get_port_x)
ports2.sort(key=get_port_x)
for i in range(len(ports1)):
if axis in ["X", "x"]:
x1 = get_port_y(ports1[i])
x2 = get_port_y(ports2[i])
else:
x1 = get_port_x(ports1[i])
x2 = get_port_x(ports2[i])
if x2 >= x1:
j += 1
else:
j -= 1
if j < min_j:
min_j = j
if j > max_j:
max_j = j
j = 0
return (max_j - min_j) * sep + 2 * radius + 1.0
def link_optical_ports_no_grouping(
ports1,
ports2,
sep=5.0,
routing_func=connect_strip,
radius=BEND_RADIUS,
start_straight=None,
end_straight=None,
sort_ports=True,
):
"""
Compared to link_ports, this function does not do any grouping.
It is not as smart for the routing, but it can fall back on arclinarc
connection if needed. We can also specify longer start_straight and end_straight
Semi auto routing for optical ports
The routing assumes manhattan routing between the different ports.
The strategy is to modify ``start_straight`` and ``end_straight`` for each
waveguide such that waveguides do not collide.
We want to connect something like this:
::
2 X X X X X X
|-----------| | | | | |-----------------------|
| |-----| | | |---------------| |
| | || |------| | |
1 X X X X X X
``start`` is at the bottom
``end`` is at the top
The general strategy is:
if x2 < x1, decrease ``start straight``, and increase ``end_straight``
(as seen on left two ports)
otherwise, decrease ``start_straight``, and increase ``end_straight``
(as seen on the last 3 right ports)
Args:
ports1: first list of optical ports
ports2: second list of optical ports
axis: specifies "X" or "Y" direction along which the port is going
routing_func: ManhattanExpandedWgConnector or ManhattanWgConnector or any other connector function with the same input
radius: bend radius. If unspecified, uses the default radius
start_straight: offset on the starting length before the first bend
end_straight: offset on the ending length after the last bend
sort_ports: True -> sort the ports according to the axis. False -> no sort applied
Returns:
a list of elements containing the connecting waveguides
"""
if ports1[0].angle in [0, 180]:
axis = "X"
else:
axis = "Y"
elems = []
j = 0
# min and max offsets needed for avoiding collisions between waveguides
min_j = 0
max_j = 0
if sort_ports:
# Sort ports according to X or Y
if axis in ["X", "x"]:
ports1.sort(key=get_port_y)
ports2.sort(key=get_port_y)
else:
ports1.sort(key=get_port_x)
ports2.sort(key=get_port_x)
# Compute max_j and min_j
for i in range(len(ports1)):
if axis in ["X", "x"]:
x1 = ports1[i].position.y
x2 = ports2[i].position.y
else:
x1 = ports1[i].position.x
x2 = ports2[i].position.x
if x2 >= x1:
j += 1
else:
j -= 1
if j < min_j:
min_j = j
if j > max_j:
max_j = j
j = 0
if start_straight is None:
start_straight = 0.2
if end_straight is None:
end_straight = 0.2
start_straight += max_j * sep
end_straight += -min_j * sep
# Do case with wire direct if the ys are close to each other
for i in range(len(ports1)):
if axis in ["X", "x"]:
x1 = ports1[i].position.y
x2 = ports2[i].position.y
else:
x1 = ports1[i].position.x
x2 = ports2[i].position.x
s_straight = start_straight - j * sep
e_straight = j * sep + end_straight
if radius is None:
elems += [
routing_func(
ports1[i],
ports2[i],
start_straight=s_straight,
end_straight=e_straight,
)
]
else:
elems += [
routing_func(
ports1[i],
ports2[i],
start_straight=s_straight,
end_straight=e_straight,
bend_radius=radius,
)
]
if x2 >= x1:
j += 1
else:
j -= 1
return elems
@autoname
def test_connect_bundle():
xs_top = [-100, -90, -80, 0, 10, 20, 40, 50, 80, 90, 100, 105, 110, 115]
pitch = 127.0
N = len(xs_top)
xs_bottom = [(i - N / 2) * pitch for i in range(N)]
top_ports = [Port("top_{}".format(i), (xs_top[i], 0), 0.5, 270) for i in range(N)]
bottom_ports = [
Port("bottom_{}".format(i), (xs_bottom[i], -400), 0.5, 90) for i in range(N)
]
top_cell = Component(name="connect_bundle")
elements = connect_bundle(top_ports, bottom_ports)
for e in elements:
top_cell.add(e)
top_cell.name = "connect_bundle"
return top_cell
@autoname
def test_connect_corner(N=6, config="A"):
d = 10.0
sep = 5.0
top_cell = Component(name="connect_corner")
if config in ["A", "B"]:
a = 100.0
ports_A_TR = [
Port("A_TR_{}".format(i), (d, a / 2 + i * sep), 0.5, 0) for i in range(N)
]
ports_A_TL = [
Port("A_TL_{}".format(i), (-d, a / 2 + i * sep), 0.5, 180) for i in range(N)
]
ports_A_BR = [
Port("A_BR_{}".format(i), (d, -a / 2 - i * sep), 0.5, 0) for i in range(N)
]
ports_A_BL = [
Port("A_BL_{}".format(i), (-d, -a / 2 - i * sep), 0.5, 180)
for i in range(N)
]
ports_A = [ports_A_TR, ports_A_TL, ports_A_BR, ports_A_BL]
ports_B_TR = [
Port("B_TR_{}".format(i), (a / 2 + i * sep, d), 0.5, 90) for i in range(N)
]
ports_B_TL = [
Port("B_TL_{}".format(i), (-a / 2 - i * sep, d), 0.5, 90) for i in range(N)
]
ports_B_BR = [
Port("B_BR_{}".format(i), (a / 2 + i * sep, -d), 0.5, 270) for i in range(N)
]
ports_B_BL = [
Port("B_BL_{}".format(i), (-a / 2 - i * sep, -d), 0.5, 270)
for i in range(N)
]
ports_B = [ports_B_TR, ports_B_TL, ports_B_BR, ports_B_BL]
elif config in ["C", "D"]:
a = N * sep + 2 * d
ports_A_TR = [
Port("A_TR_{}".format(i), (a, d + i * sep), 0.5, 0) for i in range(N)
]
ports_A_TL = [
Port("A_TL_{}".format(i), (-a, d + i * sep), 0.5, 180) for i in range(N)
]
ports_A_BR = [
Port("A_BR_{}".format(i), (a, -d - i * sep), 0.5, 0) for i in range(N)
]
ports_A_BL = [
Port("A_BL_{}".format(i), (-a, -d - i * sep), 0.5, 180) for i in range(N)
]
ports_A = [ports_A_TR, ports_A_TL, ports_A_BR, ports_A_BL]
ports_B_TR = [
Port("B_TR_{}".format(i), (d + i * sep, a), 0.5, 90) for i in range(N)
]
ports_B_TL = [
Port("B_TL_{}".format(i), (-d - i * sep, a), 0.5, 90) for i in range(N)
]
ports_B_BR = [
Port("B_BR_{}".format(i), (d + i * sep, -a), 0.5, 270) for i in range(N)
]
ports_B_BL = [
Port("B_BL_{}".format(i), (-d - i * sep, -a), 0.5, 270) for i in range(N)
]
ports_B = [ports_B_TR, ports_B_TL, ports_B_BR, ports_B_BL]
if config in ["A", "C"]:
for ports1, ports2 in zip(ports_A, ports_B):
elements = connect_bundle(ports1, ports2)
top_cell.add(elements)
elif config in ["B", "D"]:
for ports1, ports2 in zip(ports_A, ports_B):
elements = connect_bundle(ports2, ports1)
top_cell.add(elements)
return top_cell
@autoname
def test_connect_bundle_udirect(dy=200, angle=270):
xs1 = [-100, -90, -80, -55, -35, 24, 0] + [200, 210, 240]
axis = "X" if angle in [0, 180] else "Y"
pitch = 10.0
N = len(xs1)
xs2 = [50 + i * pitch for i in range(N)]
if axis == "X":
ports1 = [Port("top_{}".format(i), (0, xs1[i]), 0.5, angle) for i in range(N)]
ports2 = [
Port("bottom_{}".format(i), (dy, xs2[i]), 0.5, angle) for i in range(N)
]
else:
ports1 = [Port("top_{}".format(i), (xs1[i], 0), 0.5, angle) for i in range(N)]
ports2 = [
Port("bottom_{}".format(i), (xs2[i], dy), 0.5, angle) for i in range(N)
]
top_cell = Component(name="connect_bundle_udirect")
elements = connect_bundle(ports1, ports2)
for e in elements:
top_cell.add(e)
return top_cell
@autoname
def test_connect_bundle_u_indirect(dy=-200, angle=180):
xs1 = [-100, -90, -80, -55, -35] + [200, 210, 240]
axis = "X" if angle in [0, 180] else "Y"
pitch = 10.0
N = len(xs1)
xs2 = [50 + i * pitch for i in range(N)]
a1 = angle
a2 = a1 + 180
if axis == "X":
ports1 = [Port("top_{}".format(i), (0, xs1[i]), 0.5, a1) for i in range(N)]
ports2 = [Port("bottom_{}".format(i), (dy, xs2[i]), 0.5, a2) for i in range(N)]
else:
ports1 = [Port("top_{}".format(i), (xs1[i], 0), 0.5, a1) for i in range(N)]
ports2 = [Port("bottom_{}".format(i), (xs2[i], dy), 0.5, a2) for i in range(N)]
top_cell = Component("connect_bundle_u_indirect")
elements = connect_bundle(ports1, ports2)
for e in elements:
top_cell.add(e)
return top_cell
@autoname
def test_facing_ports():
dy = 200.0
xs1 = [-500, -300, -100, -90, -80, -55, -35, 200, 210, 240, 500, 650]
pitch = 10.0
N = len(xs1)
xs2 = [-20 + i * pitch for i in range(N // 2)]
xs2 += [400 + i * pitch for i in range(N // 2)]
a1 = 90
a2 = a1 + 180
ports1 = [Port("top_{}".format(i), (xs1[i], 0), 0.5, a1) for i in range(N)]
ports2 = [Port("bottom_{}".format(i), (xs2[i], dy), 0.5, a2) for i in range(N)]
top_cell = Component("test_facing_ports")
elements = connect_bundle(ports1, ports2)
# elements = link_ports_path_length_match(ports1, ports2)
top_cell.add(elements)
return top_cell
def demo_connect_bundle():
""" combines all the connect_bundle tests """
y = 400.0
x = 500
y0 = 900
dy = 200.0
c = Component("connect_bundle")
for j, s in enumerate([-1, 1]):
for i, angle in enumerate([0, 90, 180, 270]):
_cmp = test_connect_bundle_u_indirect(dy=s * dy, angle=angle)
_cmp_ref = _cmp.ref(position=(i * x, j * y))
c.add(_cmp_ref)
_cmp = test_connect_bundle_udirect(dy=s * dy, angle=angle)
_cmp_ref = _cmp.ref(position=(i * x, j * y + y0))
c.add(_cmp_ref)
for i, config in enumerate(["A", "B", "C", "D"]):
_cmp = test_connect_corner(config=config)
_cmp_ref = _cmp.ref(position=(i * x, 1700))
c.add(_cmp_ref)
_cmp = test_facing_ports()
_cmp_ref = _cmp.ref(position=(800, 1820))
c.add(_cmp_ref)
return c
def demo_connect_bundle_small(bend_radius=5):
import pp
c = pp.c.mmi1x2()
elements = connect_bundle([c.ports["E0"]], [c.ports["E1"]], bend_radius=5)
c.add(elements)
return c
if __name__ == "__main__":
import pp
c = demo_connect_bundle()
# c = demo_connect_bundle_small()
pp.show(c)
|
{
"content_hash": "5727e5b942934bf148263c2945319d5d",
"timestamp": "",
"source": "github",
"line_count": 1095,
"max_line_length": 219,
"avg_line_length": 29.804566210045664,
"alnum_prop": 0.5294460105405074,
"repo_name": "psiq/gdsfactory",
"id": "9e5dc184d3970cc75ee6c9997c93168580f8a125",
"size": "32636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pp/routing/connect_bundle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "111940"
},
{
"name": "Makefile",
"bytes": "3712"
},
{
"name": "Python",
"bytes": "900889"
},
{
"name": "Shell",
"bytes": "293"
},
{
"name": "XS",
"bytes": "10068"
}
],
"symlink_target": ""
}
|
from anki.hooks import wrap
from aqt.editor import Editor, EditorWebView
import os
import sys
from BeautifulSoup import BeautifulSoup
from PyQt4.QtGui import *
from PyQt4.QtCore import *
REMOVE_ATTRIBUTES = [
'color',
'background-color',
]
def purgeAttributes(self, mime, _old):
html = mime.html()
soup = BeautifulSoup(html)
newMime = QMimeData()
for tag in soup.recursiveChildGenerator():
# remove attributes in the list
index = -1
try:
for key, value in tag.attrs:
index += 1
if key != 'style':
continue
new = value.split(';')
new = ';'.join([s for s in new
if s.split(':')[0].strip() not in REMOVE_ATTRIBUTES])
tag.attrs[index] = (u'style', new)
except AttributeError:
# 'NavigableString' object has no attribute 'attrs'
pass
# assign the modified html to new Mime
newMime.setHtml(str(soup).decode('utf8'))
# default _processHtml method
return _old(self, newMime)
EditorWebView._processHtml = wrap(EditorWebView._processHtml, purgeAttributes, 'around')
|
{
"content_hash": "b7747ca44ac87a4d9602c4c495126128",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 88,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.5933333333333334,
"repo_name": "searene/Anki-Addons",
"id": "0c02c7ee20cd0a934dda946f1274ea8cee26d1ab",
"size": "1247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PurgeAttributes/2.0/PurgeAttributes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75206"
},
{
"name": "Shell",
"bytes": "741"
}
],
"symlink_target": ""
}
|
from readtagger.bam_io import BamAlignmentReader as Reader
from readtagger.tagcluster import TagCluster
from readtagger.tagcluster import TargetSiteDuplication
INPUT = 'tagged_dm6.bam'
def test_tsd(datadir_copy): # noqa: D103
cluster = get_cluster(datadir_copy)
tsd = TargetSiteDuplication(cluster)
assert len(tsd.unassigned_support) == 0
assert tsd.is_valid
tsd = TargetSiteDuplication(cluster, include_duplicates=True)
assert len(tsd.unassigned_support) == 1
assert tsd.is_valid
[r.set_tag('AD', None) for r in cluster]
tsd = TargetSiteDuplication(cluster, include_duplicates=True)
assert not tsd.is_valid
def test_tagcluster_with_splits(datadir_copy): # noqa: D103
cluster = get_cluster(datadir_copy)
tc = TagCluster(cluster)
assert len(tc.tsd.three_p_support) == 4
assert len(tc.tsd.five_p_support) == 1
assert len(tc.tsd.unassigned_support) == 0
def get_cluster(datadir_copy):
"""Get readcluster(= all reads) from input."""
with Reader(str(datadir_copy[INPUT])) as reader:
return [r for r in reader]
|
{
"content_hash": "86081da4ecdead0a1aac8182865fa613",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 34.0625,
"alnum_prop": 0.7146788990825688,
"repo_name": "bardin-lab/readtagger",
"id": "ac93077fa4a74d38390ff1dc53f21ebd1316d277",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tagcluster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "334699"
},
{
"name": "Shell",
"bytes": "1137"
}
],
"symlink_target": ""
}
|
"""
Translation support for admin forms.
*django-parler* provides the following classes:
* Model support: :class:`TranslatableAdmin`.
* Inline support: :class:`TranslatableInlineModelAdmin`, :class:`TranslatableStackedInline`, :class:`TranslatableTabularInline`.
* Utilities: :class:`SortedRelatedFieldListFilter`.
Admin classes can be created as expected:
.. code-block:: python
from django.contrib import admin
from parler.admin import TranslatableAdmin
from myapp.models import Project
class ProjectAdmin(TranslatableAdmin):
list_display = ('title', 'status')
fieldsets = (
(None, {
'fields': ('title', 'status'),
}),
)
admin.site.register(Project, ProjectAdmin)
All translated fields can be used in the :attr:`~django.contrib.admin.ModelAdmin.list_display`
and :attr:`~django.contrib.admin.ModelAdmin.fieldsets` like normal fields.
While almost every admin feature just works, there are a few special cases to take care of:
* The :attr:`~django.contrib.admin.ModelAdmin.search_fields` needs the actual ORM fields.
* The :attr:`~django.contrib.admin.ModelAdmin.prepopulated_fields` needs to be replaced with a call
to :func:`~django.contrib.admin.ModelAdmin.get_prepopulated_fields`.
See the :ref:`admin compatibility page <admin-compat>` for details.
"""
from __future__ import unicode_literals
import django
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.admin.options import csrf_protect_m, BaseModelAdmin, InlineModelAdmin
from django.contrib.admin.util import get_deleted_objects, unquote
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import router
from django.forms import Media
from django.http import HttpResponseRedirect, Http404, HttpRequest
from django.shortcuts import render
from django.utils.encoding import iri_to_uri, force_text
from django.utils.functional import lazy
from django.utils.html import conditional_escape, escape
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils import six
from parler import appsettings
from parler.forms import TranslatableModelForm, TranslatableBaseInlineFormSet
from parler.managers import TranslatableQuerySet
from parler.models import TranslatableModel
from parler.utils.compat import transaction_atomic, add_preserved_filters
from parler.utils.i18n import get_language_title, is_multilingual_project
from parler.utils.views import get_language_parameter, get_language_tabs
from parler.utils.template import select_template_name
# Code partially taken from django-hvad
# which is (c) 2011, Jonas Obrist, BSD licensed
__all__ = (
'BaseTranslatableAdmin',
'TranslatableAdmin',
'TranslatableInlineModelAdmin',
'TranslatableStackedInline',
'TranslatableTabularInline',
'SortedRelatedFieldListFilter',
)
_language_media = Media(css={
'all': ('parler/admin/parler_admin.css',)
})
_language_prepopulated_media = _language_media + Media(js=(
'admin/js/urlify.js',
'admin/js/prepopulate.min.js'
))
_fakeRequest = HttpRequest()
class BaseTranslatableAdmin(BaseModelAdmin):
"""
The shared code between the regular model admin and inline classes.
"""
#: The form to use for the model.
form = TranslatableModelForm
#: The URL parameter for the language value.
query_language_key = 'language'
@property
def media(self):
# Currently, `prepopulated_fields` can't be used because it breaks the admin validation.
# TODO: as a fix TranslatedFields should become a RelatedField on the shared model (may also support ORM queries)
# As workaround, declare the fields in get_prepopulated_fields() and we'll provide the admin media automatically.
has_prepoplated = len(self.get_prepopulated_fields(_fakeRequest))
base_media = super(BaseTranslatableAdmin, self).media
if has_prepoplated:
return base_media + _language_prepopulated_media
else:
return base_media + _language_media
def _has_translatable_model(self):
# Allow fallback to regular models when needed.
return issubclass(self.model, TranslatableModel)
def _language(self, request, obj=None):
"""
Get the language parameter from the current request.
"""
return get_language_parameter(request, self.query_language_key)
def get_form_language(self, request, obj=None):
"""
Return the current language for the currently displayed object fields.
"""
if obj is not None:
return obj.get_current_language()
else:
return self._language(request)
def get_queryset_language(self, request):
"""
Return the language to use in the queryset.
"""
if not is_multilingual_project():
# Make sure the current translations remain visible, not the dynamically set get_language() value.
return appsettings.PARLER_LANGUAGES.get_default_language()
else:
# Allow to adjust to current language
# This is overwritten for the inlines, which follow the primary object.
return get_language()
def get_queryset(self, request):
"""
Make sure the current language is selected.
"""
if django.VERSION >= (1, 6):
qs = super(BaseTranslatableAdmin, self).get_queryset(request)
else:
qs = super(BaseTranslatableAdmin, self).queryset(request)
if self._has_translatable_model():
if not isinstance(qs, TranslatableQuerySet):
raise ImproperlyConfigured("{0} class does not inherit from TranslatableQuerySet".format(qs.__class__.__name__))
# Apply a consistent language to all objects.
qs_language = self.get_queryset_language(request)
if qs_language:
qs = qs.language(qs_language)
return qs
# For Django 1.5
queryset = get_queryset
def get_language_tabs(self, request, obj, available_languages, css_class=None):
"""
Determine the language tabs to show.
"""
current_language = self.get_form_language(request, obj)
return get_language_tabs(request, current_language, available_languages, css_class=css_class)
class TranslatableAdmin(BaseTranslatableAdmin, admin.ModelAdmin):
"""
Base class for translated admins.
This class also works as regular admin for non TranslatableModel objects.
When using this class with a non-TranslatableModel,
all operations effectively become a NO-OP.
"""
deletion_not_allowed_template = 'admin/parler/deletion_not_allowed.html'
#: Whether translations of inlines should also be deleted when deleting a translation.
delete_inline_translations = True
@property
def change_form_template(self):
"""
Dynamic property to support transition to regular models.
This automatically picks ``admin/parler/change_form.html`` when the admin uses a translatable model.
"""
if self._has_translatable_model():
# While this breaks the admin template name detection,
# the get_change_form_base_template() makes sure it inherits from your template.
return 'admin/parler/change_form.html'
else:
return None # get default admin selection
def language_column(self, object):
"""
The language column which can be included in the ``list_display``.
"""
return self._languages_column(object, span_classes='available-languages') # span class for backwards compatibility
language_column.allow_tags = True
language_column.short_description = _("Languages")
def all_languages_column(self, object):
"""
The language column which can be included in the ``list_display``.
It also shows untranslated languages
"""
all_languages = [code for code,__ in settings.LANGUAGES]
return self._languages_column(object, all_languages, span_classes='all-languages')
all_languages_column.allow_tags = True
all_languages_column.short_description = _("Languages")
def _languages_column(self, object, all_languages=None, span_classes=''):
active_languages = self.get_available_languages(object)
if all_languages is None:
all_languages = active_languages
current_language = object.get_current_language()
buttons = []
opts = self.opts
for code in (all_languages or active_languages):
classes = ['lang-code']
if code in active_languages:
classes.append('active')
else:
classes.append('untranslated')
if code == current_language:
classes.append('current')
info = _get_model_meta(opts)
admin_url = reverse('admin:{0}_{1}_change'.format(*info), args=(object.pk,), current_app=self.admin_site.name)
buttons.append('<a class="{classes}" href="{href}?language={language_code}">{title}</a>'.format(
language_code=code,
classes=' '.join(classes),
href=escape(admin_url),
title=conditional_escape(self.get_language_short_title(code))
))
return '<span class="language-buttons {0}">{1}</span>'.format(
span_classes,
' '.join(buttons)
)
def get_language_short_title(self, language_code):
"""
Hook for allowing to change the title in the :func:`language_column` of the list_display.
"""
# Show language codes in uppercase by default.
# This avoids a general text-transform CSS rule,
# that might conflict with showing longer titles for a language instead of the code.
# (e.g. show "Global" instead of "EN")
return language_code.upper()
def get_available_languages(self, obj):
"""
Fetching the available languages as queryset.
"""
if obj:
return obj.get_available_languages()
else:
return self.model._parler_meta.root_model.objects.none()
def get_object(self, request, object_id, *args, **kwargs):
"""
Make sure the object is fetched in the correct language.
"""
# The args/kwargs are to support Django 1.8, which adds a from_field parameter
obj = super(TranslatableAdmin, self).get_object(request, object_id, *args, **kwargs)
if obj is not None and self._has_translatable_model(): # Allow fallback to regular models.
obj.set_current_language(self._language(request, obj), initialize=True)
return obj
def get_form(self, request, obj=None, **kwargs):
"""
Pass the current language to the form.
"""
form_class = super(TranslatableAdmin, self).get_form(request, obj, **kwargs)
if self._has_translatable_model():
form_class.language_code = self.get_form_language(request, obj)
return form_class
def get_urls(self):
"""
Add a delete-translation view.
"""
urlpatterns = super(TranslatableAdmin, self).get_urls()
if not self._has_translatable_model():
return urlpatterns
else:
opts = self.model._meta
info = _get_model_meta(opts)
return patterns('',
url(r'^(.+)/delete-translation/(.+)/$',
self.admin_site.admin_view(self.delete_translation),
name='{0}_{1}_delete_translation'.format(*info)
),
) + urlpatterns
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
Insert the language tabs.
"""
if self._has_translatable_model():
lang_code = self.get_form_language(request, obj)
lang = get_language_title(lang_code)
available_languages = self.get_available_languages(obj)
language_tabs = self.get_language_tabs(request, obj, available_languages)
context['language_tabs'] = language_tabs
if language_tabs:
context['title'] = '%s (%s)' % (context['title'], lang)
if not language_tabs.current_is_translated:
add = True # lets prepopulated_fields_js work.
# Patch form_url to contain the "language" GET parameter.
# Otherwise AdminModel.render_change_form will clean the URL
# and remove the "language" when coming from a filtered object
# list causing the wrong translation to be changed.
form_url = add_preserved_filters({'preserved_filters': urlencode({'language': lang_code}), 'opts': self.model._meta}, form_url)
# django-fluent-pages uses the same technique
if 'default_change_form_template' not in context:
context['default_change_form_template'] = self.get_change_form_base_template()
#context['base_template'] = self.get_change_form_base_template()
return super(TranslatableAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def response_add(self, request, obj, post_url_continue=None):
# Minor behavior difference for Django 1.4
if post_url_continue is None and django.VERSION < (1,5):
post_url_continue = '../%s/'
# Make sure ?language=... is included in the redirects.
redirect = super(TranslatableAdmin, self).response_add(request, obj, post_url_continue)
return self._patch_redirect(request, obj, redirect)
def response_change(self, request, obj):
# Make sure ?language=... is included in the redirects.
redirect = super(TranslatableAdmin, self).response_change(request, obj)
return self._patch_redirect(request, obj, redirect)
def _patch_redirect(self, request, obj, redirect):
if redirect.status_code not in (301,302):
return redirect # a 200 response likely.
uri = iri_to_uri(request.path)
opts = self.model._meta
info = _get_model_meta(opts)
# Pass ?language=.. to next page.
language = request.GET.get(self.query_language_key)
if language:
continue_urls = (uri, "../add/", reverse('admin:{0}_{1}_add'.format(*info)))
if redirect['Location'] in continue_urls and self.query_language_key in request.GET:
# "Save and add another" / "Save and continue" URLs
redirect['Location'] += "?{0}={1}".format(self.query_language_key, language)
return redirect
@csrf_protect_m
@transaction_atomic
def delete_translation(self, request, object_id, language_code):
"""
The 'delete translation' admin view for this model.
"""
opts = self.model._meta
root_model = self.model._parler_meta.root_model
# Get object and translation
shared_obj = self.get_object(request, unquote(object_id))
if shared_obj is None:
raise Http404
shared_obj.set_current_language(language_code)
try:
translation = root_model.objects.get(master=shared_obj, language_code=language_code)
except root_model.DoesNotExist:
raise Http404
if not self.has_delete_permission(request, translation):
raise PermissionDenied
if len(self.get_available_languages(shared_obj)) <= 1:
return self.deletion_not_allowed(request, translation, language_code)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
using = router.db_for_write(root_model) # NOTE: all same DB for now.
lang = get_language_title(language_code)
# There are potentially multiple objects to delete;
# the translation object at the base level,
# and additional objects that can be added by inherited models.
deleted_objects = []
perms_needed = False
protected = []
# Extend deleted objects with the inlines.
for qs in self.get_translation_objects(request, translation.language_code, obj=shared_obj, inlines=self.delete_inline_translations):
if isinstance(qs, (list,tuple)):
qs_opts = qs[0]._meta
else:
qs_opts = qs.model._meta
deleted_result = get_deleted_objects(qs, qs_opts, request.user, self.admin_site, using)
if django.VERSION >= (1,8):
(del2, model_counts, perms2, protected2) = deleted_result
else:
(del2, perms2, protected2) = deleted_result
deleted_objects += del2
perms_needed = perms_needed or perms2
protected += protected2
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = _('{0} translation of {1}').format(lang, force_text(translation)) # in hvad: (translation.master)
self.log_deletion(request, translation, obj_display)
self.delete_model_translation(request, translation)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % dict(
name=force_text(opts.verbose_name), obj=force_text(obj_display)
))
if self.has_change_permission(request, None):
info = _get_model_meta(opts)
return HttpResponseRedirect(reverse('admin:{0}_{1}_change'.format(*info), args=(object_id,), current_app=self.admin_site.name))
else:
return HttpResponseRedirect(reverse('admin:index', current_app=self.admin_site.name))
object_name = _('{0} Translation').format(force_text(opts.verbose_name))
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": object_name,
"object": translation,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": opts.app_label,
}
return render(request, self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (opts.app_label, opts.object_name.lower()),
"admin/%s/delete_confirmation.html" % opts.app_label,
"admin/delete_confirmation.html"
], context)
def deletion_not_allowed(self, request, obj, language_code):
"""
Deletion-not-allowed view.
"""
opts = self.model._meta
context = {
'object': obj.master,
'language_code': language_code,
'opts': opts,
'app_label': opts.app_label,
'language_name': get_language_title(language_code),
'object_name': force_text(opts.verbose_name)
}
return render(request, self.deletion_not_allowed_template, context)
def delete_model_translation(self, request, translation):
"""
Hook for deleting a translation.
This calls :func:`get_translation_objects` to collect all related objects for the translation.
By default, that includes the translations for inline objects.
"""
master = translation.master
for qs in self.get_translation_objects(request, translation.language_code, obj=master, inlines=self.delete_inline_translations):
if isinstance(qs, (tuple,list)):
# The objects are deleted one by one.
# This triggers the post_delete signals and such.
for obj in qs:
obj.delete()
else:
# Also delete translations of inlines which the user has access to.
# This doesn't trigger signals, just like the regular
qs.delete()
def get_translation_objects(self, request, language_code, obj=None, inlines=True):
"""
Return all objects that should be deleted when a translation is deleted.
This method can yield all QuerySet objects or lists for the objects.
"""
if obj is not None:
# A single model can hold multiple TranslatedFieldsModel objects.
# Return them all.
for translations_model in obj._parler_meta.get_all_models():
try:
translation = translations_model.objects.get(master=obj, language_code=language_code)
except translations_model.DoesNotExist:
continue
yield [translation]
if inlines:
for inline, qs in self._get_inline_translations(request, language_code, obj=obj):
yield qs
def _get_inline_translations(self, request, language_code, obj=None):
"""
Fetch the inline translations
"""
# django 1.4 do not accept the obj parameter
if django.VERSION < (1, 5):
inline_instances = self.get_inline_instances(request)
else:
inline_instances = self.get_inline_instances(request, obj=obj)
for inline in inline_instances:
if issubclass(inline.model, TranslatableModel):
# leverage inlineformset_factory() to find the ForeignKey.
# This also resolves the fk_name if it's set.
fk = inline.get_formset(request, obj).fk
rel_name = 'master__{0}'.format(fk.name)
filters = {
'language_code': language_code,
rel_name: obj
}
for translations_model in inline.model._parler_meta.get_all_models():
qs = translations_model.objects.filter(**filters)
if obj is not None:
qs = qs.using(obj._state.db)
yield inline, qs
def get_change_form_base_template(self):
"""
Determine what the actual `change_form_template` should be.
"""
opts = self.model._meta
app_label = opts.app_label
return _lazy_select_template_name((
"admin/{0}/{1}/change_form.html".format(app_label, opts.object_name.lower()),
"admin/{0}/change_form.html".format(app_label),
"admin/change_form.html"
))
_lazy_select_template_name = lazy(select_template_name, six.text_type)
class TranslatableInlineModelAdmin(BaseTranslatableAdmin, InlineModelAdmin):
"""
Base class for inline models.
"""
#: The form to use.
form = TranslatableModelForm
#: The formset to use.
formset = TranslatableBaseInlineFormSet
@property
def inline_tabs(self):
"""
Whether to show inline tabs, can be set as attribute on the inline.
"""
return not self._has_translatable_parent_model()
def _has_translatable_parent_model(self):
# Allow fallback to regular models when needed.
return issubclass(self.parent_model, TranslatableModel)
def get_queryset_language(self, request):
if not is_multilingual_project():
# Make sure the current translations remain visible, not the dynamically set get_language() value.
return appsettings.PARLER_LANGUAGES.get_default_language()
else:
# Set the initial language for fetched objects.
# This is needed for the TranslatableInlineModelAdmin
return self._language(request)
def get_formset(self, request, obj=None, **kwargs):
"""
Return the formset, and provide the language information to the formset.
"""
FormSet = super(TranslatableInlineModelAdmin, self).get_formset(request, obj, **kwargs)
# Existing objects already got the language code from the queryset().language() method.
# For new objects, the language code should be set here.
FormSet.language_code = self.get_form_language(request, obj)
if self.inline_tabs:
# Need to pass information to the template, this can only happen via the FormSet object.
available_languages = self.get_available_languages(obj, FormSet)
FormSet.language_tabs = self.get_language_tabs(request, obj, available_languages, css_class='parler-inline-language-tabs')
FormSet.language_tabs.allow_deletion = self._has_translatable_parent_model() # Views not available otherwise.
return FormSet
def get_form_language(self, request, obj=None):
"""
Return the current language for the currently displayed object fields.
"""
if self._has_translatable_parent_model():
return super(TranslatableInlineModelAdmin, self).get_form_language(request, obj=obj)
else:
# Follow the ?language parameter
return self._language(request)
def get_available_languages(self, obj, formset):
"""
Fetching the available inline languages as queryset.
"""
if obj:
# Inlines dictate language code, not the parent model.
# Hence, not looking at obj.get_available_languages(), but see what languages
# are used by the inline objects that point to it.
filter = {
'master__{0}'.format(formset.fk.name): obj
}
return self.model._parler_meta.root_model.objects.using(obj._state.db).filter(**filter) \
.values_list('language_code', flat=True).distinct().order_by('language_code')
else:
return self.model._parler_meta.root_model.objects.none()
class TranslatableStackedInline(TranslatableInlineModelAdmin):
"""
The inline class for stacked layout.
"""
@property
def template(self):
if self.inline_tabs:
return 'admin/parler/edit_inline/stacked_tabs.html'
else:
# Admin default
return 'admin/edit_inline/stacked.html'
class TranslatableTabularInline(TranslatableInlineModelAdmin):
"""
The inline class for tabular layout.
"""
@property
def template(self):
if self.inline_tabs:
return 'admin/parler/edit_inline/tabular_tabs.html'
else:
# Admin default
return 'admin/edit_inline/tabular.html'
class SortedRelatedFieldListFilter(admin.RelatedFieldListFilter):
"""
Override the standard :class:`~django.contrib.admin.RelatedFieldListFilter`,
to sort the values after rendering their ``__unicode__()`` values.
This can be used for translated models, which are difficult to sort beforehand.
Usage:
.. code-block:: python
from django.contrib import admin
from parler.admin import SortedRelatedFieldListFilter
class MyAdmin(admin.ModelAdmin):
list_filter = (
('related_field_name', SortedRelatedFieldListFilter),
)
"""
def __init__(self, *args, **kwargs):
super(SortedRelatedFieldListFilter, self).__init__(*args, **kwargs)
self.lookup_choices = sorted(self.lookup_choices, key=lambda a: a[1].lower())
if django.VERSION >= (1,7):
def _get_model_meta(opts):
return opts.app_label, opts.model_name
else:
def _get_model_meta(opts):
return opts.app_label, opts.module_name
|
{
"content_hash": "5ee914dbac645de5152e9cb4ed400507",
"timestamp": "",
"source": "github",
"line_count": 715,
"max_line_length": 143,
"avg_line_length": 38.95384615384615,
"alnum_prop": 0.632342381157547,
"repo_name": "ellmetha/django-parler",
"id": "6f42c78d64d2ab196524df5ef3f8f624cdbba891",
"size": "27852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parler/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5290"
},
{
"name": "HTML",
"bytes": "6000"
},
{
"name": "Python",
"bytes": "224232"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
from os.path import abspath, basename, join, isdir, isfile, islink
from egginst.utils import rm_rf
verbose = False
executable = sys.executable
hashbang_pat = re.compile(r'#!.+$', re.M)
def write_exe(dst):
from exe_data import cli as data
rm_rf(dst)
try:
open(dst, 'wb').write(data)
except IOError:
# When bootstrapping, the file egginst.exe is in use and can therefore
# not be rewritten, which is OK since its content is always the same.
pass
os.chmod(dst, 0755)
def create_proxy(src, bin_dir):
"""
create a proxy of src in bin_dir (Windows only)
"""
if verbose:
print "Creating proxy executable to: %r" % src
assert src.endswith('.exe')
dst_name = basename(src)
if dst_name.startswith('epd-'):
dst_name = dst_name[4:]
dst = join(bin_dir, dst_name)
write_exe(dst)
dst_script = dst[:-4] + '-script.py'
rm_rf(dst_script)
fo = open(dst_script, 'w')
fo.write('''\
#!"%(python)s"
# This proxy was created by egginst from an egg with special instructions
#
import sys
import subprocess
src = %(src)r
sys.exit(subprocess.call([src] + sys.argv[1:]))
''' % dict(python=executable, src=src))
fo.close()
return dst, dst_script
def create_proxies(egg):
# This function is called on Windows only
if not isdir(egg.bin_dir):
os.makedirs(egg.bin_dir)
for line in egg.lines_from_arcname('EGG-INFO/inst/files_to_install.txt'):
arcname, action = line.split()
if verbose:
print "arcname=%r action=%r" % (arcname, action)
if action == 'PROXY':
ei = 'EGG-INFO/'
if arcname.startswith(ei):
src = abspath(join(egg.meta_dir, arcname[len(ei):]))
else:
src = abspath(join(egg.prefix, arcname))
if verbose:
print " src: %r" % src
egg.files.extend(create_proxy(src, egg.bin_dir))
else:
data = egg.z.read(arcname)
dst = abspath(join(egg.prefix, action, basename(arcname)))
if verbose:
print " dst: %r" % dst
rm_rf(dst)
fo = open(dst, 'wb')
fo.write(data)
fo.close()
egg.files.append(dst)
def write_script(path, entry_pt, egg_name):
"""
Write an entry point script to path.
"""
if verbose:
print 'Creating script: %s' % path
assert entry_pt.count(':') == 1
module, func = entry_pt.strip().split(':')
python = '"%s"' % executable
rm_rf(path)
fo = open(path, 'w')
fo.write('''\
#!%(python)s
# This script was created by egginst when installing:
#
# %(egg_name)s
#
if __name__ == '__main__':
import sys
from %(module)s import %(func)s
sys.exit(%(func)s())
''' % locals())
fo.close()
os.chmod(path, 0755)
def create(egg, conf):
if not isdir(egg.bin_dir):
os.makedirs(egg.bin_dir)
script_type = 'console_scripts'
if script_type not in conf.sections():
return
for name, entry_pt in conf.items(script_type):
fname = name
exe_path = join(egg.bin_dir, '%s.exe' % name)
write_exe(exe_path)
egg.files.append(exe_path)
fname += '-script.py'
path = join(egg.bin_dir, fname)
write_script(path, entry_pt, basename(egg.fpath))
egg.files.append(path)
def fix_script(path):
"""
Fixes a single located at path.
"""
if islink(path) or not isfile(path):
return
fi = open(path)
data = fi.read()
fi.close()
if ' egginst ' in data:
# This string is in the comment when write_script() creates
# the script, so there is no need to fix anything.
return
m = hashbang_pat.match(data)
if not (m and 'python' in m.group().lower()):
return
python = '"%s"' % executable
new_data = hashbang_pat.sub('#!' + python.replace('\\', '\\\\'),
data, count=1)
if new_data == data:
return
if verbose:
print "Updating: %r" % path
fo = open(path, 'w')
fo.write(new_data)
fo.close()
os.chmod(path, 0755)
def fix_scripts(egg):
for path in egg.files:
if path.startswith(egg.bin_dir):
fix_script(path)
if __name__ == '__main__':
write_exe('cli.exe')
|
{
"content_hash": "6166f07eda654ee2ff8ec201d9bb5029",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 78,
"avg_line_length": 24.39779005524862,
"alnum_prop": 0.5620471014492754,
"repo_name": "ilanschnell/ironpkg",
"id": "7efe7d11d9b808419f2b2a01b5b3fe3f2a47ca58",
"size": "4416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "egginst/scripts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "133028"
},
{
"name": "Shell",
"bytes": "501"
}
],
"symlink_target": ""
}
|
import struct
from .connection import Connection, PacketCodec
class AbridgedPacketCodec(PacketCodec):
tag = b'\xef'
obfuscate_tag = b'\xef\xef\xef\xef'
def encode_packet(self, data):
length = len(data) >> 2
if length < 127:
length = struct.pack('B', length)
else:
length = b'\x7f' + int.to_bytes(length, 3, 'little')
return length + data
async def read_packet(self, reader):
length = struct.unpack('<B', await reader.readexactly(1))[0]
if length >= 127:
length = struct.unpack(
'<i', await reader.readexactly(3) + b'\0')[0]
return await reader.readexactly(length << 2)
class ConnectionTcpAbridged(Connection):
"""
This is the mode with the lowest overhead, as it will
only require 1 byte if the packet length is less than
508 bytes (127 << 2, which is very common).
"""
packet_codec = AbridgedPacketCodec
|
{
"content_hash": "861268dbbe75f5c95d7bbd51ca9bf908",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 29.12121212121212,
"alnum_prop": 0.6097814776274714,
"repo_name": "expectocode/Telethon",
"id": "171b1d8c89a11f1e5a863d0ac4b87324b7676f0b",
"size": "961",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "telethon/network/connection/tcpabridged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "776"
},
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "443578"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.vaspio_set has been moved pymatgen.io.vasp.sets. "
"This stub will be removed in pymatgen 4.0.")
from .vasp.sets import *
|
{
"content_hash": "b919c2e864403cd2caca83cde8a511a6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 25.5,
"alnum_prop": 0.6557734204793029,
"repo_name": "sonium0/pymatgen",
"id": "2c76e7790201474f4b99cb022d2d518d6d90354f",
"size": "591",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/io/vaspio_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Groff",
"bytes": "868"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3590333"
}
],
"symlink_target": ""
}
|
"""
Very basic estimator copied (nearly) wholesale from
https://www.kaggle.com/zygmunt/rossmann-store-sales/predict-sales-with-pandas-py.
This is mostly just for testing purposes, as pretty much anything we put together will exhibit
better performance than this.
Author: BWP
"""
import pandas as pd
from sklearn.base import BaseEstimator
train_file = '../data/enriched/train.csv'
test_file = '../data/enriched/test.csv'
output_file = '../data/output/predictions.csv'
class MedianEstimator(BaseEstimator):
def __init__(self,
cols=['Store','DayOfWeek', 'Open', 'Promo', 'StateHoliday'],
cols_backup=['Store','DayOfWeek', 'Open']):
self.cols = cols
self.cols_backup = cols_backup
self.medians = None
self.medians_backup = None
"""
Main runner method. Accepts dataframes train_features and train_values and test, and learns a
DataFrame of medians conditioned about the columns in cols, while also learning a backup
DataFrame of the subset cols_backup in case there are any holes in the training set when applied
to the test set.
"""
def fit(self, train_features, train_values):
# this fast version with pre-computed medians and merge thanks to dune_dweller
# https://www.kaggle.com/dvasyukova/rossmann-store-sales/predict-sales-with-pandas-py/code
train = train_features
train.loc[:,'Sales'] = train_values
medians = train.groupby( self.cols )['Sales'].median()
self.medians = medians.reset_index()
medians_backup = train.groupby( self.cols_backup )['Sales'].median()
self.medians_backup = medians_backup.reset_index()
assert(self.medians_backup.Sales.isnull().sum() == 0)
"""
Assuming that the model has already been fitted, predicts the values of the given test set
"""
def predict(self, test):
assert(len(self.medians.columns) == len(self.cols) + 1)
assert('Sales' in self.medians.columns)
assert(len(self.medians_backup.columns) == len(self.cols_backup) + 1)
assert('Sales' in self.medians_backup.columns)
assert(self.medians_backup.Sales.isnull().sum() == 0)
assert(all([c in test for c in self.cols]))
test2 = pd.merge(test,
self.medians,
on = self.cols,
how = 'left' )
test2_backup = pd.merge(test.copy(),
self.medians_backup,
on = self.cols_backup,
how = 'left' )
assert( len( test2 ) == len( test2_backup ))
assert( 'Sales' in test2.columns )
assert(test2_backup.Sales.isnull().sum() == 0)
# apply backup
test2.loc[ test2.Sales.isnull(), 'Sales' ] = test2_backup.loc[ test2.Sales.isnull(), 'Sales' ]
# shop closed -> sales = 0
test2.loc[ test2.Open == 0, 'Sales' ] = 0
assert( test2.Sales.isnull().sum() == 0 )
try:
return test2[[ 'Id', 'Sales' ]]
except KeyError:
return test2['Sales']
|
{
"content_hash": "8c11af6f113254c0eb3570d68fb2d18e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 102,
"avg_line_length": 38.98765432098765,
"alnum_prop": 0.6010132995566815,
"repo_name": "bwpriest/rossmannsales",
"id": "8f49f165385340c03c17ad12a958627b31bee26f",
"size": "3158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/baseline_simple_median.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25589"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test use of ${TARGET.dir} to specify a CPPPATH directory in
combination VariantDirs and a generated .h file.
"""
import TestSCons
_exe = TestSCons._exe
test = TestSCons.TestSCons()
build1_foo = test.workpath('build1', 'foo' + _exe)
build2_foo = test.workpath('build2', 'foo' + _exe)
test.subdir('src', 'build1', 'build2')
test.write('SConstruct', """
def cat(env, source, target):
target = str(target[0])
f = open(target, "wb")
for src in source:
f.write(open(str(src), "rb").read())
f.close()
env = Environment(CPPPATH='${TARGET.dir}')
env.Append(BUILDERS = {'Cat' : Builder(action=cat)})
Export('env')
VariantDir('build1', 'src')
SConscript('build1/SConscript')
VariantDir('build2', 'src')
SConscript('build2/SConscript', duplicate=0)
""")
test.write(['src', 'SConscript'], """
Import('env')
env.Cat('foo.h', 'foo.h.in')
env.Program('foo', ['foo.c'])
""")
test.write(['src', 'foo.h.in'], """\
#define STRING "foo.h.in\\n"
""")
test.write(['src', 'foo.c'], """\
#include <stdio.h>
#include <stdlib.h>
#include <foo.h>
int
main(int argc, char *argv[])
{
printf(STRING);
printf("foo.c\\n");
exit (0);
}
""")
test.run(arguments = '.')
test.run(program = build1_foo, stdout = "foo.h.in\nfoo.c\n")
test.run(program = build2_foo, stdout = "foo.h.in\nfoo.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "9efca0e2957323c7c59828f59cfde0cd",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 61,
"avg_line_length": 21.112676056338028,
"alnum_prop": 0.6270847231487658,
"repo_name": "timj/scons",
"id": "9e99087896e64800237ec94463883787bc8705e0",
"size": "2601",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/TARGET-dir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "593"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7393581"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52480"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
}
|
import datetime
import json
import logging
from typing import Any, Dict, Iterable, Tuple
import pendulum
from dateutil import relativedelta
from sqlalchemy import TIMESTAMP, PickleType, and_, event, false, nullsfirst, or_, tuple_
from sqlalchemy.dialects import mssql, mysql
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import Session
from sqlalchemy.sql import ColumnElement
from sqlalchemy.sql.expression import ColumnOperators
from sqlalchemy.types import JSON, Text, TypeDecorator, TypeEngine, UnicodeText
from airflow import settings
from airflow.configuration import conf
from airflow.serialization.enums import Encoding
log = logging.getLogger(__name__)
utc = pendulum.tz.timezone('UTC')
using_mysql = conf.get_mandatory_value('database', 'sql_alchemy_conn').lower().startswith('mysql')
class UtcDateTime(TypeDecorator):
"""
Almost equivalent to :class:`~sqlalchemy.types.TIMESTAMP` with
``timezone=True`` option, but it differs from that by:
- Never silently take naive :class:`~datetime.datetime`, instead it
always raise :exc:`ValueError` unless time zone aware value.
- :class:`~datetime.datetime` value's :attr:`~datetime.datetime.tzinfo`
is always converted to UTC.
- Unlike SQLAlchemy's built-in :class:`~sqlalchemy.types.TIMESTAMP`,
it never return naive :class:`~datetime.datetime`, but time zone
aware value, even with SQLite or MySQL.
- Always returns TIMESTAMP in UTC
"""
impl = TIMESTAMP(timezone=True)
cache_ok = True
def process_bind_param(self, value, dialect):
if value is not None:
if not isinstance(value, datetime.datetime):
raise TypeError('expected datetime.datetime, not ' + repr(value))
elif value.tzinfo is None:
raise ValueError('naive datetime is disallowed')
# For mysql we should store timestamps as naive values
# Timestamp in MYSQL is not timezone aware. In MySQL 5.6
# timezone added at the end is ignored but in MySQL 5.7
# inserting timezone value fails with 'invalid-date'
# See https://issues.apache.org/jira/browse/AIRFLOW-7001
if using_mysql:
from airflow.utils.timezone import make_naive
return make_naive(value, timezone=utc)
return value.astimezone(utc)
return None
def process_result_value(self, value, dialect):
"""
Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database.
"""
if value is not None:
if value.tzinfo is None:
value = value.replace(tzinfo=utc)
else:
value = value.astimezone(utc)
return value
def load_dialect_impl(self, dialect):
if dialect.name == 'mssql':
return mssql.DATETIME2(precision=6)
elif dialect.name == 'mysql':
return mysql.TIMESTAMP(fsp=6)
return super().load_dialect_impl(dialect)
class ExtendedJSON(TypeDecorator):
"""
A version of the JSON column that uses the Airflow extended JSON
serialization provided by airflow.serialization.
"""
impl = Text
cache_ok = True
def db_supports_json(self):
"""Checks if the database supports JSON (i.e. is NOT MSSQL)"""
return not conf.get("database", "sql_alchemy_conn").startswith("mssql")
def load_dialect_impl(self, dialect) -> "TypeEngine":
if self.db_supports_json():
return dialect.type_descriptor(JSON)
return dialect.type_descriptor(UnicodeText)
def process_bind_param(self, value, dialect):
from airflow.serialization.serialized_objects import BaseSerialization
if value is None:
return None
# First, encode it into our custom JSON-targeted dict format
value = BaseSerialization._serialize(value)
# Then, if the database does not have native JSON support, encode it again as a string
if not self.db_supports_json():
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
from airflow.serialization.serialized_objects import BaseSerialization
if value is None:
return None
# Deserialize from a string first if needed
if not self.db_supports_json():
value = json.loads(value)
return BaseSerialization._deserialize(value)
class ExecutorConfigType(PickleType):
"""
Adds special handling for K8s executor config. If we unpickle a k8s object that was
pickled under an earlier k8s library version, then the unpickled object may throw an error
when to_dict is called. To be more tolerant of version changes we convert to JSON using
Airflow's serializer before pickling.
"""
def bind_processor(self, dialect):
from airflow.serialization.serialized_objects import BaseSerialization
super_process = super().bind_processor(dialect)
def process(value):
if isinstance(value, dict) and 'pod_override' in value:
value['pod_override'] = BaseSerialization()._serialize(value['pod_override'])
return super_process(value)
return process
def result_processor(self, dialect, coltype):
from airflow.serialization.serialized_objects import BaseSerialization
super_process = super().result_processor(dialect, coltype)
def process(value):
value = super_process(value) # unpickle
if isinstance(value, dict) and 'pod_override' in value:
pod_override = value['pod_override']
# If pod_override was serialized with Airflow's BaseSerialization, deserialize it
if isinstance(pod_override, dict) and pod_override.get(Encoding.TYPE):
value['pod_override'] = BaseSerialization()._deserialize(pod_override)
return value
return process
def compare_values(self, x, y):
"""
The TaskInstance.executor_config attribute is a pickled object that may contain
kubernetes objects. If the installed library version has changed since the
object was originally pickled, due to the underlying ``__eq__`` method on these
objects (which converts them to JSON), we may encounter attribute errors. In this
case we should replace the stored object.
From https://github.com/apache/airflow/pull/24356 we use our serializer to store
k8s objects, but there could still be raw pickled k8s objects in the database,
stored from earlier version, so we still compare them defensively here.
"""
if self.comparator:
return self.comparator(x, y)
else:
try:
return x == y
except AttributeError:
return False
class Interval(TypeDecorator):
"""Base class representing a time interval."""
impl = Text
cache_ok = True
attr_keys = {
datetime.timedelta: ('days', 'seconds', 'microseconds'),
relativedelta.relativedelta: (
'years',
'months',
'days',
'leapdays',
'hours',
'minutes',
'seconds',
'microseconds',
'year',
'month',
'day',
'hour',
'minute',
'second',
'microsecond',
),
}
def process_bind_param(self, value, dialect):
if isinstance(value, tuple(self.attr_keys)):
attrs = {key: getattr(value, key) for key in self.attr_keys[type(value)]}
return json.dumps({'type': type(value).__name__, 'attrs': attrs})
return json.dumps(value)
def process_result_value(self, value, dialect):
if not value:
return value
data = json.loads(value)
if isinstance(data, dict):
type_map = {key.__name__: key for key in self.attr_keys}
return type_map[data['type']](**data['attrs'])
return data
def skip_locked(session: Session) -> Dict[str, Any]:
"""
Return kargs for passing to `with_for_update()` suitable for the current DB engine version.
We do this as we document the fact that on DB engines that don't support this construct, we do not
support/recommend running HA scheduler. If a user ignores this and tries anyway everything will still
work, just slightly slower in some circumstances.
Specifically don't emit SKIP LOCKED for MySQL < 8, or MariaDB, neither of which support this construct
See https://jira.mariadb.org/browse/MDEV-13115
"""
dialect = session.bind.dialect
if dialect.name != "mysql" or dialect.supports_for_update_of:
return {'skip_locked': True}
else:
return {}
def nowait(session: Session) -> Dict[str, Any]:
"""
Return kwargs for passing to `with_for_update()` suitable for the current DB engine version.
We do this as we document the fact that on DB engines that don't support this construct, we do not
support/recommend running HA scheduler. If a user ignores this and tries anyway everything will still
work, just slightly slower in some circumstances.
Specifically don't emit NOWAIT for MySQL < 8, or MariaDB, neither of which support this construct
See https://jira.mariadb.org/browse/MDEV-13115
"""
dialect = session.bind.dialect
if dialect.name != "mysql" or dialect.supports_for_update_of:
return {'nowait': True}
else:
return {}
def nulls_first(col, session: Session) -> Dict[str, Any]:
"""
Adds a nullsfirst construct to the column ordering. Currently only Postgres supports it.
In MySQL & Sqlite NULL values are considered lower than any non-NULL value, therefore, NULL values
appear first when the order is ASC (ascending)
"""
if session.bind.dialect.name == "postgresql":
return nullsfirst(col)
else:
return col
USE_ROW_LEVEL_LOCKING: bool = conf.getboolean('scheduler', 'use_row_level_locking', fallback=True)
def with_row_locks(query, session: Session, **kwargs):
"""
Apply with_for_update to an SQLAlchemy query, if row level locking is in use.
:param query: An SQLAlchemy Query object
:param session: ORM Session
:param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc)
:return: updated query
"""
dialect = session.bind.dialect
# Don't use row level locks if the MySQL dialect (Mariadb & MySQL < 8) does not support it.
if USE_ROW_LEVEL_LOCKING and (dialect.name != "mysql" or dialect.supports_for_update_of):
return query.with_for_update(**kwargs)
else:
return query
class CommitProhibitorGuard:
"""Context manager class that powers prohibit_commit"""
expected_commit = False
def __init__(self, session: Session):
self.session = session
def _validate_commit(self, _):
if self.expected_commit:
self.expected_commit = False
return
raise RuntimeError("UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!")
def __enter__(self):
event.listen(self.session, 'before_commit', self._validate_commit)
return self
def __exit__(self, *exc_info):
event.remove(self.session, 'before_commit', self._validate_commit)
def commit(self):
"""
Commit the session.
This is the required way to commit when the guard is in scope
"""
self.expected_commit = True
self.session.commit()
def prohibit_commit(session):
"""
Return a context manager that will disallow any commit that isn't done via the context manager.
The aim of this is to ensure that transaction lifetime is strictly controlled which is especially
important in the core scheduler loop. Any commit on the session that is _not_ via this context manager
will result in RuntimeError
Example usage:
.. code:: python
with prohibit_commit(session) as guard:
# ... do something with session
guard.commit()
# This would throw an error
# session.commit()
"""
return CommitProhibitorGuard(session)
def is_lock_not_available_error(error: OperationalError):
"""Check if the Error is about not being able to acquire lock"""
# DB specific error codes:
# Postgres: 55P03
# MySQL: 3572, 'Statement aborted because lock(s) could not be acquired immediately and NOWAIT
# is set.'
# MySQL: 1205, 'Lock wait timeout exceeded; try restarting transaction
# (when NOWAIT isn't available)
db_err_code = getattr(error.orig, 'pgcode', None) or error.orig.args[0]
# We could test if error.orig is an instance of
# psycopg2.errors.LockNotAvailable/_mysql_exceptions.OperationalError, but that involves
# importing it. This doesn't
if db_err_code in ('55P03', 1205, 3572):
return True
return False
def tuple_in_condition(
columns: Tuple[ColumnElement, ...],
collection: Iterable[Any],
) -> ColumnOperators:
"""Generates a tuple-in-collection operator to use in ``.filter()``.
For most SQL backends, this generates a simple ``([col, ...]) IN [condition]``
clause. This however does not work with MSSQL, where we need to expand to
``(c1 = v1a AND c2 = v2a ...) OR (c1 = v1b AND c2 = v2b ...) ...`` manually.
:meta private:
"""
if settings.engine.dialect.name != "mssql":
return tuple_(*columns).in_(collection)
clauses = [and_(*(c == v for c, v in zip(columns, values))) for values in collection]
if not clauses:
return false()
return or_(*clauses)
|
{
"content_hash": "90d8d4a80eda77ae105aec4180f0ca3b",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 106,
"avg_line_length": 34.99750623441396,
"alnum_prop": 0.6502778965369816,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "a6270ea0087bfd903cad35090d9b2845b6f6f46c",
"size": "14822",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/utils/sqlalchemy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from . import FixtureTest
# test that features get assigned the correct collision rank.
#
# note that the collision rank system has been designed to make changing ranks
# and re-arranging / re-ordering very easy. in turn, this might make these
# tests very fragile because they check the exact number which is assigned as
# the rank.
#
# if updating these after every change becomes onerous, consider only testing
# important salient values (e.g: the first ones after an important reserved
# block) and switching the other tests to relative, as in CollisionOrderTest
#
class CollisionRankTest(FixtureTest):
def _check_rank(self, tags, zoom=16, source='openstreetmap.org',
layer='pois', kind=None, rank=None, geom_type='point'):
import dsl
z, x, y = (zoom, 0, 0)
all_tags = tags.copy()
all_tags['source'] = source
if 'name' not in all_tags:
all_tags['name'] = 'Some name'
assert geom_type in ('point', 'line', 'polygon')
if geom_type == 'point':
shape = dsl.tile_centre_shape(z, x, y)
elif geom_type == 'line':
shape = dsl.tile_diagonal(z, x, y)
elif geom_type == 'polygon':
shape = dsl.tile_box(z, x, y)
self.generate_fixtures(dsl.way(1, shape, all_tags))
self.assert_has_feature(
z, x, y, layer, {
'kind': kind,
'collision_rank': rank,
})
def test_continent(self):
self._check_rank(
{'place': 'continent'},
zoom=1, layer='earth',
kind='continent', rank=300)
def test_transit_subway(self):
self._check_rank(
{'route': 'subway'},
geom_type='line',
layer='transit',
kind='subway', rank=765)
def test_pois_swimming_area(self):
self._check_rank(
{'leisure': 'swimming_area'},
layer='pois',
kind='swimming_area', rank=3099)
def test_pois_battlefield(self):
self._check_rank(
{'historic': 'battlefield'},
layer='pois',
kind='battlefield', rank=546)
def test_pois_picnic_site(self):
self._check_rank(
{'tourism': 'picnic_site'},
layer='pois', kind='picnic_site',
rank=3097)
def test_water_ocean(self):
self._check_rank(
{'place': 'ocean'},
layer='water', kind='ocean',
rank=301)
def test_pois_water_park(self):
self._check_rank(
{'leisure': 'water_park'},
layer='pois', kind='water_park',
rank=1155)
def test_pois_fast_food(self):
self._check_rank(
{'amenity': 'fast_food'},
layer='pois', kind='fast_food',
rank=1156)
def test_pois_chemist(self):
self._check_rank(
{'shop': 'chemist'},
layer='pois', kind='chemist',
rank=1257)
def test_pois_cafe(self):
self._check_rank(
{'amenity': 'cafe'},
layer='pois', kind='cafe',
rank=1894)
def test_pois_doityourself(self):
self._check_rank(
{'shop': 'doityourself'},
layer='pois', kind='doityourself',
rank=1071)
def test_pois_shelter(self):
self._check_rank(
{'amenity': 'shelter'},
layer='pois', kind='shelter',
rank=3123)
def test_transit_station(self):
self._check_rank(
{'railway': 'station'},
geom_type='polygon',
layer='transit', kind='station',
rank=3747)
def test_pois_aviary(self):
self._check_rank(
{'zoo': 'aviary'},
layer='pois', kind='aviary',
rank=3309)
def test_pois_travel_agent(self):
self._check_rank(
{'office': 'travel_agent'},
layer='pois', kind='travel_agent',
rank=3745)
def test_pois_aerodrome(self):
self._check_rank(
{'aeroway': 'aerodrome'},
layer='pois', kind='aerodrome',
rank=493)
def test_pois_caravan_site(self):
self._check_rank(
{'tourism': 'caravan_site'},
layer='pois', kind='caravan_site',
rank=1354)
def test_water_riverbank(self):
self._check_rank(
{'waterway': 'riverbank'},
geom_type='line',
layer='water', kind='riverbank',
rank=2372)
def test_pois_wood(self):
self._check_rank(
{'landuse': 'wood'},
geom_type='polygon',
layer='pois', kind='wood',
rank=500)
def test_landuse_industrial(self):
self._check_rank(
{'landuse': 'industrial'},
geom_type='polygon',
layer='landuse', kind='industrial',
rank=2849)
def test_pois_tobacco(self):
self._check_rank(
{'shop': 'tobacco'},
layer='pois', kind='tobacco',
rank=3743)
def test_pois_healthcare_centre(self):
self._check_rank(
{'healthcare': 'centre'},
layer='pois', kind='healthcare_centre',
rank=3445)
def test_pois_generator(self):
self._check_rank(
{'power': 'generator'},
layer='pois', kind='generator',
rank=2703)
def test_pois_post_box(self):
self._check_rank(
{'amenity': 'post_box'},
layer='pois', kind='post_box',
rank=4318)
def test_landuse_grass(self):
self._check_rank(
{'landuse': 'grass'},
geom_type='polygon',
layer='landuse', kind='grass',
rank=2901)
def test_non_maritime_boundary(self):
from tilequeue.tile import coord_to_bounds
from shapely.geometry import LineString
from ModestMaps.Core import Coordinate
import dsl
z, x, y = (8, 44, 88)
left_props = {
'source': 'openstreetmap.org',
'boundary': 'administrative',
'admin_level': '2',
'name': 'Country 1',
'mz_boundary_from_polygon': True, # need this for hack
}
right_props = {
'source': 'openstreetmap.org',
'boundary': 'administrative',
'admin_level': '2',
'name': 'Country 2',
'mz_boundary_from_polygon': True, # need this for hack
}
minx, miny, maxx, maxy = coord_to_bounds(
Coordinate(zoom=z, column=x, row=y))
# move the coordinate points slightly out of the tile, so that we
# don't get borders along the sides of the tile.
w = maxx - minx
h = maxy - miny
minx -= 0.5 * w
miny -= 0.5 * h
maxx += 0.5 * w
maxy += 0.5 * h
self.generate_fixtures(
dsl.way(1, dsl.tile_box(z, x, y), {
'source': 'tilezen.org',
'maritime_boundary': True,
'min_zoom': 0,
'kind': 'maritime',
}),
dsl.way(
1,
LineString([
[minx, miny],
[minx, maxy],
[maxx, maxy],
[minx, miny],
]),
left_props,
),
dsl.way(
2,
LineString([
[minx, miny],
[maxx, maxy],
[maxx, miny],
[minx, miny],
]),
right_props,
),
)
self.assert_has_feature(
z, x, y, 'boundaries', {
'kind': 'country',
'maritime_boundary': type(None),
'collision_rank': 807,
})
def test_maritime_boundary(self):
import dsl
z, x, y = (8, 44, 88)
self.generate_fixtures(
dsl.way(2, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org',
'name': 'Country 1',
'boundary': 'administrative',
'admin_level': '2',
'mz_boundary_from_polygon': True, # need this for hack
}),
)
self.assert_has_feature(
z, x, y, 'boundaries', {
'kind': 'country',
'maritime_boundary': True,
'collision_rank': 2375,
})
# helper class to make it easier to write CollisionOrderTest.
#
# creates items, identified by their unique ID, and makes a tile based on them.
# the tile is then checked to make sure the collision_rank assigned each
# feature is the same as the order of IDs passed in from the test.
class ItemList(object):
def __init__(self, test_instance, zoom=16, x=0, y=0):
self.test_instance = test_instance
self.items = []
self.id_counter = 1
self.z = zoom
self.x = x
self.y = y
def append(self, tags={}, source='openstreetmap.org', layer='pois',
geom_type='point'):
import dsl
all_tags = tags.copy()
all_tags['source'] = source
if 'name' not in all_tags:
all_tags['name'] = 'Some name'
assert geom_type in ('point', 'line', 'polygon')
if geom_type == 'point':
shape = dsl.tile_centre_shape(self.z, self.x, self.y)
elif geom_type == 'line':
shape = dsl.tile_diagonal(self.z, self.x, self.y)
elif geom_type == 'polygon':
shape = dsl.tile_box(self.z, self.x, self.y)
item_fid = self.id_counter
self.id_counter += 1
self.items.append(dsl.way(item_fid, shape, all_tags))
return item_fid
def assert_order(self, order):
self.test_instance.generate_fixtures(*self.items)
items = {}
with self.test_instance.tile(self.z, self.x, self.y) as layers:
for layer_name, features in layers.iteritems():
for feature in features:
fid = feature['properties']['id']
rank = feature['properties']['collision_rank']
assert fid not in items
items[fid] = rank
self.test_instance.assertTrue(items, msg="Expected some items, but "
"received an empty tile.")
# note that we only get inside this "if" statement if we're in
# "download only" mode, as it short-circuits the assertions.
# otherwise a genuinely empty tile would have triggered the assertion
# already.
#
# i'm really looking forward to the day when we remove all
# non-generative fixtures, and we can remove this hack too!
if not items:
return
rank = 0
for item_fid in order:
self.test_instance.assertTrue(
item_fid in items, msg="Item %d missing from items seen in "
"tile (%r), perhaps it wasn't correctly matched?"
% (item_fid, items.keys()))
item_rank = items[item_fid]
self.test_instance.assertTrue(
item_rank > rank, msg="Item ranks lower than previous items "
"in the list. (%d <= %d)" % (item_rank, rank))
rank = item_rank
# a more robust way to do the tests: rather than check the exact value of the
# collision_rank, we can check that one kind has a rank value more or less than
# another. this is closer to a long term meaning of collision priority; that
# some features should be displayed in preference to others.
#
class CollisionOrderTest(FixtureTest):
# example of a more robust test: it doesn't matter exactly what the
# collision_rank of fuel or police is, what matters is that fuel's rank
# is less than police's.
def test_fuel_before_police(self):
items = ItemList(self)
# set up all the test items
police = items.append(tags={'amenity': 'police'})
fuel = items.append(tags={'amenity': 'fuel'})
items.assert_order([fuel, police])
# we should only apply a collision_rank where there's a label, so the feature
# should either be a PONI (POI with no name) or a named feature. we also extend
# this to include shield text and ref.
class WhereTest(FixtureTest):
def test_toilets(self):
# toilets are PONIs - we want to see an icon on the map even if it's
# not a famous enough set of facilities that it got a name.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.way(1, dsl.tile_centre_shape(z, x, y), {
'amenity': 'toilets',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'kind': 'toilets',
'collision_rank': int,
})
def test_road_no_name_no_shield(self):
# we'll only need a collision rank on a road if it has some form of
# label, which means a name, ref, shield_text or one of the shield
# text variants. if it has none of them, we still want the feature,
# but no the collision_rank.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), {
'highway': 'unclassified',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'kind': 'minor_road',
'collision_rank': type(None),
})
def test_road_ref(self):
# if the road has no name and no shield text, but does have a ref, then
# we want to keep it.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), {
'highway': 'unclassified',
'ref': '1',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'kind': 'minor_road',
'name': type(None),
'shield_text': type(None),
'ref': '1',
'collision_rank': int,
})
def test_road_shield_text(self):
# if the road has no name, but does have a shield, then we want to give
# it a collision_rank.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.is_in('US', z, x, y),
dsl.way(1, dsl.tile_diagonal(z, x, y), {
'highway': 'motorway',
'source': 'openstreetmap.org',
}),
dsl.relation(1, {
'network': 'US-I',
'ref': '101',
'type': 'route',
'route': 'road',
}, ways=[1]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'kind': 'highway',
'name': type(None),
'shield_text': '101',
'collision_rank': int,
})
|
{
"content_hash": "4b248a1d16b5f7d7b3f98ca76d217a1f",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 79,
"avg_line_length": 31.366803278688526,
"alnum_prop": 0.5055203501665905,
"repo_name": "mapzen/vector-datasource",
"id": "710a7dafa549a24c38071bb9b7ea56f2532babe0",
"size": "15333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration-test/988-add-collision-rank.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2839"
},
{
"name": "PLpgSQL",
"bytes": "32195"
},
{
"name": "Python",
"bytes": "268894"
},
{
"name": "SQLPL",
"bytes": "222"
},
{
"name": "Shell",
"bytes": "13283"
},
{
"name": "XSLT",
"bytes": "339"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.