text stringlengths 4 1.02M | meta dict |
|---|---|
"""Common utilities for the tests
"""
import time
import unittest
import random
random.seed()
import sys
import traceback
import pdb
from thingflow.base import IterableAsOutputThing, InputThing, FatalError,\
SensorEvent, Filter
class RandomSensor:
def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
self.sensor_id = sensor_id
self.mean = mean
self.stddev = stddev
self.stop_after_events = stop_after_events
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield random.gauss(mean, stddev)
else: # go on forever
def generator():
while True:
yield random.gauss(mean, stddev)
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
if self.stop_after_events is None:
return 'RandomSensor(%s, mean=%s, stddev=%s)' % \
(self.sensor_id, self.mean, self.stddev)
else:
return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \
(self.sensor_id, self.mean, self.stddev, self.stop_after_events)
class ValueListSensor:
def __init__(self, sensor_id, values):
self.sensor_id = sensor_id
def generator():
for v in values:
yield v
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
return 'ValueListSensor(%s)' % self.sensor_id
def make_test_output_thing(sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
"""Here is an exmple test output_thing that generates a random value"""
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield SensorEvent(sensor_id, time.time(),
random.gauss(mean, stddev))
else: # go on forever
def generator():
while True:
yield SensorEvent(sensor_id, time.time(),
random.gauss(mean, stddev))
g = generator()
o = IterableAsOutputThing(g, name='Sensor(%s)' % sensor_id)
return o
def make_test_output_thing_from_vallist(sensor_id, values):
"""Create a output_thing that generates the list of values when sampled, but uses
real timestamps.
"""
def generator():
for val in values:
yield SensorEvent(sensor_id, time.time(), val)
o = IterableAsOutputThing(generator(), name='Sensor(%s)' % sensor_id)
return o
class ValidationInputThing(InputThing):
"""Compare the values in a event stream to the expected values.
Use the test_case for the assertions (for proper error reporting in a unit
test).
"""
def __init__(self, expected_stream, test_case,
extract_value_fn=lambda event:event.val):
self.expected_stream = expected_stream
self.next_idx = 0
self.test_case = test_case # this can be either a method or a class
self.extract_value_fn = extract_value_fn
self.completed = False
self.name = "ValidationInputThing(%s)" % \
test_case.__class__.__name__ \
if isinstance(test_case, unittest.TestCase) \
else "ValidationInputThing(%s.%s)" % \
(test_case.__self__.__class__.__name__,
test_case.__name__)
def on_next(self, x):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertLess(self.next_idx, len(self.expected_stream),
"Got an event after reaching the end of the expected stream")
expected = self.expected_stream[self.next_idx]
actual = self.extract_value_fn(x)
tcls.assertEqual(actual, expected,
"Values for element %d of event stream mismatch" %
self.next_idx)
self.next_idx += 1
def on_completed(self):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertEqual(self.next_idx, len(self.expected_stream),
"Got on_completed() before end of stream")
self.completed = True
def on_error(self, exc):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertTrue(False,
"Got an unexpected on_error call with parameter: %s" %
exc)
def __repr__(self):
return self.name
class SensorEventValidationInputThing(InputThing):
"""Compare the full events in a sensor event stream to the expected events.
Use the test_case for the assertions (for proper error reporting in a unit
test).
"""
def __init__(self, expected_sensor_events, test_case):
self.expected_sensor_events = expected_sensor_events
self.next_idx = 0
self.test_case = test_case
self.completed = False
def on_next(self, x):
tc = self.test_case
tc.assertLess(self.next_idx, len(self.expected_sensor_events),
"Got an event after reaching the end of the expected stream")
expected = self.expected_sensor_events[self.next_idx]
actual = x
tc.assertEqual(actual.val, expected.val,
"Values for element %d of event stream mismatch" % self.next_idx)
tc.assertEqual(actual.sensor_id, expected.sensor_id,
"sensor ids for element %d of event stream mismatch" % self.next_idx)
# since the timestamp is a floating point number, we only check that
# the timestamps are "close enough"
tc.assertAlmostEqual(actual.ts, expected.ts, places=5,
msg="Timestamps for element %d of event stream mismatch" % self.next_idx)
self.next_idx += 1
def on_completed(self):
tc = self.test_case
tc.assertEqual(self.next_idx, len(self.expected_sensor_events),
"Got on_completed() before end of stream")
self.completed = True
def on_error(self, exc):
tc = self.test_case
tc.assertTrue(False,
"Got an unexpected on_error call with parameter: %s" % exc)
class ValidateAndStopInputThing(ValidationInputThing):
"""A version of ValidationInputThing that calls a stop
function after the specified events have been received.
"""
def __init__(self, expected_stream, test_case, stop_fn,
extract_value_fn=lambda event:event.val):
super().__init__(expected_stream, test_case,
extract_value_fn=extract_value_fn)
self.stop_fn = stop_fn
def on_next(self, x):
super().on_next(x)
if self.next_idx==len(self.expected_stream):
print("ValidateAndStopInputThing: stopping")
self.stop_fn()
class CaptureInputThing(InputThing):
"""Capture the sequence of events in a list for later use.
"""
def __init__(self, expecting_error=False):
self.events = []
self.completed = False
self.expecting_error = expecting_error
self.errored = False
def on_next(self, x):
self.events.append(x)
def on_completed(self):
self.completed = True
def on_error(self, e):
if self.expecting_error:
self.errored = True
else:
raise FatalError("Should not get on_error, got on_error(%s)" % e)
class StopAfterN(Filter):
"""Filter to call a stop function after N events.
Usually, the stop function is the deschedule function for an upstream sensor.
"""
def __init__(self, previous_in_chain, stop_fn, N=5):
super().__init__(previous_in_chain)
self.stop_fn = stop_fn
self.N = N
assert N>0
self.count = 0
def on_next(self, x):
self._dispatch_next(x)
self.count += 1
if self.count==self.N:
print("stopping after %d events" % self.N)
self.stop_fn()
def trace_on_error(f):
"""Decorator helpful when debugging. Will put the decorated function/method
into the debugger when an exception is thrown
"""
def decorator(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
info = sys.exc_info()
traceback.print_exception(*info)
pdb.post_mortem(info[2])
return decorator
| {
"content_hash": "1eac938fda96a292e76d1f0af80e7ab3",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 102,
"avg_line_length": 36.50833333333333,
"alnum_prop": 0.5889066423191053,
"repo_name": "mpi-sws-rse/thingflow-python",
"id": "08c2d3e3ea9ba873be2d07935bff6de45dbf1806",
"size": "8854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "290455"
},
{
"name": "Shell",
"bytes": "6604"
}
],
"symlink_target": ""
} |
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class OtsuThresholdSegmentationInputSpec(CommandLineInputSpec):
brightObjects = traits.Bool(desc="Segmenting bright objects on a dark background or dark objects on a bright background.", argstr="--brightObjects ")
numberOfBins = traits.Int(desc="This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", argstr="--numberOfBins %d")
faceConnected = traits.Bool(desc="This is an advanced parameter. Adjacent voxels are face connected. This affects the connected component algorithm. If this parameter is false, more regions are likely to be identified.", argstr="--faceConnected ")
minimumObjectSize = traits.Int(desc="Minimum size of object to retain. This parameter can be used to get rid of small regions in noisy images.", argstr="--minimumObjectSize %d")
inputVolume = File(position=-2, desc="Input volume to be segmented", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s")
class OtsuThresholdSegmentationOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Output filtered", exists=True)
class OtsuThresholdSegmentation(SEMLikeCommandLine):
"""title: Otsu Threshold Segmentation
category: Legacy.Segmentation
description: This filter creates a labeled image from a grayscale image. First, it calculates an optimal threshold that separates the image into foreground and background. This threshold separates those two classes so that their intra-class variance is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter runs a connected component algorithm to generate unique labels for each connected region of the foreground. Finally, the resulting image is relabeled to provide consecutive numbering.
version: 1.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdSegmentation
contributor: Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = OtsuThresholdSegmentationInputSpec
output_spec = OtsuThresholdSegmentationOutputSpec
_cmd = "OtsuThresholdSegmentation "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
| {
"content_hash": "fbd76d3588cccf28a0c5e6f26f33f944",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 512,
"avg_line_length": 71.90243902439025,
"alnum_prop": 0.7917232021709634,
"repo_name": "iglpdc/nipype",
"id": "af724c9f96cdf6e2af6032b01301300a2995ffc6",
"size": "2971",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "nipype/interfaces/slicer/legacy/segmentation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4458175"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import sys
from decimal import Decimal, getcontext
def load_num():
num_str = ''
while num_str == '\n' or num_str=='':
num_str = sys.stdin.readline()
return int(num_str.rstrip())
if __name__ == '__main__':
# This problem is trivial using python's arbitrary precission integers.
# A C implementation would require to roll your own integer system,
# with a few operations, and use them to aproximate the solution with
# a numerical method like bisection or newton
# https://en.wikipedia.org/wiki/Root-finding_algorithm
# https://en.wikipedia.org/wiki/Bisection_method
# https://en.wikipedia.org/wiki/Newton%27s_method
cases = load_num()
getcontext().prec = 2000
for c in range(cases):
y = load_num()
print(str(int(Decimal(y).sqrt())))
if c+1 < cases:
print('')
| {
"content_hash": "ca35032accabbd7a813d37071376a5aa",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 28.06451612903226,
"alnum_prop": 0.628735632183908,
"repo_name": "secnot/uva-onlinejudge-solutions",
"id": "20cadf8e47f6f7aa128434acc386414349ab967d",
"size": "870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "10023 - Square root/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7894"
},
{
"name": "Python",
"bytes": "108568"
}
],
"symlink_target": ""
} |
"""
Sensors of a KNX Device.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/knx/
"""
from homeassistant.const import (TEMP_CELSIUS, TEMPERATURE, CONF_TYPE,
ILLUMINANCE, SPEED_MS, CONF_MINIMUM,
CONF_MAXIMUM)
from homeassistant.components.knx import (KNXConfig, KNXGroupAddress)
DEPENDENCIES = ["knx"]
# Speed units
SPEED_METERPERSECOND = "m/s" # type: str
# Illuminance units
ILLUMINANCE_LUX = "lx" # type: str
# Predefined Minimum, Maximum Values for Sensors
# Temperature as defined in KNX Standard 3.10 - 9.001 DPT_Value_Temp
KNX_TEMP_MIN = -273
KNX_TEMP_MAX = 670760
# Luminance(LUX) as Defined in KNX Standard 3.10 - 9.004 DPT_Value_Lux
KNX_LUX_MIN = 0
KNX_LUX_MAX = 670760
# Speed m/s as defined in KNX Standard 3.10 - 9.005 DPT_Value_Wsp
KNX_SPEED_MS_MIN = 0
KNX_SPEED_MS_MAX = 670760
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Setup the KNX Sensor platform."""
# Add KNX Temperature Sensors
# KNX Datapoint 9.001 DPT_Value_Temp
if config[CONF_TYPE] == TEMPERATURE:
minimum_value, maximum_value = \
update_and_define_min_max(config, KNX_TEMP_MIN,
KNX_TEMP_MAX)
add_entities([
KNXSensorFloatClass(hass, KNXConfig(config), TEMP_CELSIUS,
minimum_value, maximum_value)
])
# Add KNX Speed Sensors(Like Wind Speed)
# KNX Datapoint 9.005 DPT_Value_Wsp
elif config[CONF_TYPE] == SPEED_MS:
minimum_value, maximum_value = \
update_and_define_min_max(config, KNX_SPEED_MS_MIN,
KNX_SPEED_MS_MAX)
add_entities([
KNXSensorFloatClass(hass, KNXConfig(config), SPEED_METERPERSECOND,
minimum_value, maximum_value)
])
# Add KNX Illuminance Sensors(Lux)
# KNX Datapoint 9.004 DPT_Value_Lux
elif config[CONF_TYPE] == ILLUMINANCE:
minimum_value, maximum_value = \
update_and_define_min_max(config, KNX_LUX_MIN, KNX_LUX_MAX)
add_entities([
KNXSensorFloatClass(hass, KNXConfig(config), ILLUMINANCE_LUX,
minimum_value, maximum_value)
])
def update_and_define_min_max(config, minimum_default,
maximum_default):
"""Function help determinate a min/max value defined in config."""
minimum_value = minimum_default
maximum_value = maximum_default
if config.get(CONF_MINIMUM):
minimum_value = config.get(CONF_MINIMUM)
if config.get(CONF_MAXIMUM):
maximum_value = config.get(CONF_MAXIMUM)
return minimum_value, maximum_value
class KNXSensorBaseClass():
"""Sensor Base Class for all KNX Sensors."""
@property
def cache(self):
"""We don't want to cache any Sensor Value."""
return False
class KNXSensorFloatClass(KNXGroupAddress, KNXSensorBaseClass):
"""
Base Implementation of a 2byte Floating Point KNX Telegram.
Defined in KNX 3.7.2 - 3.10
"""
def __init__(self, hass, config, unit_of_measurement, minimum_sensor_value,
maximum_sensor_value):
"""Initialize a KNX Float Sensor."""
self._unit_of_measurement = unit_of_measurement
self._minimum_value = minimum_sensor_value
self._maximum_value = maximum_sensor_value
self._value = None
KNXGroupAddress.__init__(self, hass, config)
@property
def state(self):
"""Return the Value of the KNX Sensor."""
return self._value
@property
def unit_of_measurement(self):
"""Return the defined Unit of Measurement for the KNX Sensor."""
return self._unit_of_measurement
def update(self):
"""Update KNX sensor."""
from knxip.conversion import knx2_to_float
super().update()
self._value = None
if self._data:
value = knx2_to_float(self._data)
if self._minimum_value <= value <= self._maximum_value:
self._value = value
| {
"content_hash": "225a2e5992b1736549dba34d290e8cbe",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 31.313432835820894,
"alnum_prop": 0.6151096282173498,
"repo_name": "robjohnson189/home-assistant",
"id": "3dce95f768886705bbceef57fa06ab81affc5913",
"size": "4196",
"binary": false,
"copies": "12",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/knx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1362685"
},
{
"name": "Python",
"bytes": "3499625"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
from decimal import Decimal
from trailer.model.fix import Fix
from trailer.model.fieldtools import nullable, make_list, make_time
class Waypoint:
def __init__(self, latitude, longitude, elevation=None, time=None,
magvar=None, geoid_height=None, name=None, comment=None,
description=None, source=None, links=None, symbol=None,
classification=None, fix=None, num_satellites=None,
hdop=None, vdop=None, pdop=None,
seconds_since_dgps_update=None, dgps_station_type=None,
speed=None, course=None,
extensions=None):
self._latitude = Decimal(latitude)
if not -90 <= self._latitude <= +90:
raise ValueError("Latitude {0} not in range -90 <= latitude <= +90")
self._longitude = Decimal(longitude)
if not -180 <= self._longitude < +180:
raise ValueError("Longitude {0} not in range -180 <= longitude < +180")
self._elevation = nullable(Decimal)(elevation)
self._time = nullable(make_time)(time)
self._magvar = nullable(Decimal)(magvar)
if self._magvar is not None:
if not 0 <= self._magvar < 360:
raise ValueError("Magnetic variation {0} not in range 0 <= magvar < 360")
self._geoid_height = nullable(Decimal)(geoid_height)
self._name = nullable(str)(name)
self._comment = nullable(str)(comment)
self._description = nullable(str)(description)
self._source = nullable(str)(source)
self._links = make_list(links)
self._symbol = nullable(str)(symbol)
self._classification = nullable(str)(classification)
self._fix = nullable(Fix)(fix)
self._num_satellites = nullable(int)(num_satellites)
if self._num_satellites is not None:
if self._num_satellites < 0:
raise ValueError("Number of satellites {0} cannot be negative")
self._hdop = nullable(Decimal)(hdop)
self._vdop = nullable(Decimal)(vdop)
self._pdop = nullable(Decimal)(pdop)
self._seconds_since_dgps_update = nullable(Decimal)(seconds_since_dgps_update)
self._dgps_station_type = nullable(int)(dgps_station_type)
if self._dgps_station_type is not None:
if not 0 <= self._dgps_station_type <= 1023:
raise ValueError("DGPS station type {0} not in range 0 <= dgps_station_type <= 1023")
self._speed = nullable(Decimal)(speed)
self._course = nullable(Decimal)(course)
if self._course is not None:
if not 0 <= self._course < 360:
raise ValueError("Course {0} not in range 0 <= course < 360")
self._extensions = make_list(extensions)
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
@property
def elevation(self):
return self._elevation
@property
def time(self):
return self._time
@property
def magvar(self):
return self._magvar
@property
def geoid_height(self):
return self._geoid_height
@property
def name(self):
return self._name
@property
def comment(self):
return self._comment
@property
def description(self):
return self._description
@property
def source(self):
return self._source
@property
def links(self):
return self._links
@property
def symbol(self):
return self._symbol
@property
def classification(self):
return self._classification
@property
def fix(self):
return self._fix
@property
def num_satellites(self):
return self._num_satellites
@property
def hdop(self):
return self._hdop
@property
def vdop(self):
return self._vdop
@property
def pdop(self):
return self._pdop
@property
def seconds_since_dgps_update(self):
return self._seconds_since_dgps_update
@property
def dgps_station_type(self):
return self._dgps_station_type
@property
def speed(self):
return self._speed
@property
def course(self):
return self._course
@property
def extensions(self):
return self._extensions | {
"content_hash": "5292df23d3d8f6e8bce92e8ea1ad5703",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 101,
"avg_line_length": 27.88535031847134,
"alnum_prop": 0.6000456829602558,
"repo_name": "rob-smallshire/trailer",
"id": "e212429aee5838daa812a2c9bd905515e7742e3f",
"size": "4378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trailer/model/waypoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50677"
}
],
"symlink_target": ""
} |
"""
Quick n dirty eigen3 detection
"""
import os, glob, types
import Options, Configure
def detect_eigen3(conf):
env = conf.env
opt = Options.options
conf.env['LIB_EIGEN3'] = ''
conf.env['EIGEN3_FOUND'] = False
if Options.options.no_eigen3:
return 0
if Options.options.eigen3:
conf.env['CPPPATH_EIGEN3'] = [Options.options.eigen3]
conf.env['LIBPATH_EIGEN3'] = [Options.options.eigen3]
else:
conf.env['CPPPATH_EIGEN3'] = ['/usr/include/eigen3', '/usr/local/include/eigen3', '/usr/include', '/usr/local/include']
conf.env['LIBPATH_EIGEN3'] = ['/usr/lib', '/usr/local/lib']
res = Configure.find_file('Eigen/Core', conf.env['CPPPATH_EIGEN3'])
conf.check_message('header','Eigen/Core', (res != '') , res)
if (res == '') :
return 0
conf.env['EIGEN3_FOUND'] = True
return 1
def detect(conf):
return detect_eigen3(conf)
def set_options(opt):
opt.add_option('--eigen3', type='string', help='path to eigen3', dest='eigen3')
opt.add_option('--no-eigen3', type='string', help='disable eigen3', dest='no_eigen3')
| {
"content_hash": "000140eee06293508b580b59082b9bd0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 121,
"avg_line_length": 29.514285714285716,
"alnum_prop": 0.6747337850919651,
"repo_name": "Evolving-AI-Lab/innovation-engine",
"id": "19787cea237f95ad1b9781dd8add143f51413bad",
"size": "1094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sferes/eigen3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7105"
},
{
"name": "C++",
"bytes": "3242109"
},
{
"name": "CMake",
"bytes": "96949"
},
{
"name": "CSS",
"bytes": "9155"
},
{
"name": "Cuda",
"bytes": "125609"
},
{
"name": "HTML",
"bytes": "7048"
},
{
"name": "Makefile",
"bytes": "20722"
},
{
"name": "Matlab",
"bytes": "20561"
},
{
"name": "Protocol Buffer",
"bytes": "38374"
},
{
"name": "Python",
"bytes": "364321"
},
{
"name": "Ruby",
"bytes": "2544"
},
{
"name": "Shell",
"bytes": "53469"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.admin.tests.pages.timestamps import local_datetime
from wagtail.core.models import Page
from wagtail.search.index import SearchField
from wagtail.tests.testapp.models import SimplePage, SingleEventPage
from wagtail.tests.utils import WagtailTestUtils
class TestPageSearch(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
def get(self, params=None, **extra):
return self.client.get(reverse('wagtailadmin_pages:search'), params or {}, **extra)
def test_view(self):
response = self.get()
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_search_searchable_fields(self):
# Find root page
root_page = Page.objects.get(id=2)
# Create a page
root_page.add_child(instance=SimplePage(
title="Hi there!", slug='hello-world', content="good morning",
live=True,
has_unpublished_changes=False,
))
# Confirm the slug is not being searched
response = self.get({'q': "hello"})
self.assertNotContains(response, "There is one matching page")
search_fields = Page.search_fields
# Add slug to the search_fields
Page.search_fields = Page.search_fields + [SearchField('slug', partial_match=True)]
# Confirm the slug is being searched
response = self.get({'q': "hello"})
self.assertContains(response, "There is one matching page")
# Reset the search fields
Page.search_fields = search_fields
def test_ajax(self):
response = self.get({'q': "Hello"}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'wagtailadmin/pages/search.html')
self.assertTemplateUsed(response, 'wagtailadmin/pages/search_results.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'q': "Hello", 'p': page})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
def test_root_can_appear_in_search_results(self):
response = self.get({'q': "roo"})
self.assertEqual(response.status_code, 200)
# 'pages' list in the response should contain root
results = response.context['pages']
self.assertTrue(any([r.slug == 'root' for r in results]))
def test_search_uses_admin_display_title_from_specific_class(self):
# SingleEventPage has a custom get_admin_display_title method; explorer should
# show the custom title rather than the basic database one
root_page = Page.objects.get(id=2)
new_event = SingleEventPage(
title="Lunar event",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1)
)
root_page.add_child(instance=new_event)
response = self.get({'q': "lunar"})
self.assertContains(response, "Lunar event (single event)")
def test_search_no_perms(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
self.assertRedirects(self.get(), '/admin/')
def test_search_order_by_title(self):
root_page = Page.objects.get(id=2)
new_event = SingleEventPage(
title="Lunar event",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1)
)
root_page.add_child(instance=new_event)
new_event_2 = SingleEventPage(
title="A Lunar event",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1)
)
root_page.add_child(instance=new_event_2)
response = self.get({'q': 'Lunar', 'ordering': 'title'})
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [new_event_2.id, new_event.id])
response = self.get({'q': 'Lunar', 'ordering': '-title'})
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [new_event.id, new_event_2.id])
def test_search_order_by_updated(self):
root_page = Page.objects.get(id=2)
new_event = SingleEventPage(
title="Lunar event",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1)
)
root_page.add_child(instance=new_event)
new_event_2 = SingleEventPage(
title="Lunar event 2",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2015, 1, 1)
)
root_page.add_child(instance=new_event_2)
response = self.get({'q': 'Lunar', 'ordering': 'latest_revision_created_at'})
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [new_event_2.id, new_event.id])
response = self.get({'q': 'Lunar', 'ordering': '-latest_revision_created_at'})
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [new_event.id, new_event_2.id])
def test_search_order_by_status(self):
root_page = Page.objects.get(id=2)
live_event = SingleEventPage(
title="Lunar event",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1),
live=True
)
root_page.add_child(instance=live_event)
draft_event = SingleEventPage(
title="Lunar event",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1),
live=False
)
root_page.add_child(instance=draft_event)
response = self.get({'q': 'Lunar', 'ordering': 'live'})
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [draft_event.id, live_event.id])
response = self.get({'q': 'Lunar', 'ordering': '-live'})
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [live_event.id, draft_event.id])
def test_search_filter_content_type(self):
# Correct content_type
response = self.get({'content_type': "demosite.standardpage"})
self.assertEqual(response.status_code, 200)
# Incorrect content_type
response = self.get({'content_type': "demosite.standardpage.error"})
self.assertEqual(response.status_code, 404)
| {
"content_hash": "abe961fa43cab7f49f0003df1f167646",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 99,
"avg_line_length": 41.38709677419355,
"alnum_prop": 0.6208106001558846,
"repo_name": "torchbox/wagtail",
"id": "61d47109864ff0da274e475e419b99231f31cf62",
"size": "7698",
"binary": false,
"copies": "5",
"ref": "refs/heads/stable/2.15.x",
"path": "wagtail/admin/tests/pages/test_page_search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "178240"
},
{
"name": "HTML",
"bytes": "307456"
},
{
"name": "JavaScript",
"bytes": "123792"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2786743"
},
{
"name": "Shell",
"bytes": "7997"
}
],
"symlink_target": ""
} |
"""
"""
from abc import ABCMeta, abstractmethod
from typing import AsyncGenerator, Callable, Iterable, Optional, Sequence
from prompt_toolkit.document import Document
from prompt_toolkit.eventloop import generator_to_async_generator
from prompt_toolkit.formatted_text import AnyFormattedText, StyleAndTextTuples
__all__ = [
"Completion",
"Completer",
"ThreadedCompleter",
"DummyCompleter",
"DynamicCompleter",
"CompleteEvent",
"merge_completers",
"get_common_complete_suffix",
]
class Completion:
"""
:param text: The new string that will be inserted into the document.
:param start_position: Position relative to the cursor_position where the
new text will start. The text will be inserted between the
start_position and the original cursor position.
:param display: (optional string or formatted text) If the completion has
to be displayed differently in the completion menu.
:param display_meta: (Optional string or formatted text) Meta information
about the completion, e.g. the path or source where it's coming from.
This can also be a callable that returns a string.
:param style: Style string.
:param selected_style: Style string, used for a selected completion.
This can override the `style` parameter.
"""
def __init__(
self,
text: str,
start_position: int = 0,
display: Optional[AnyFormattedText] = None,
display_meta: Optional[AnyFormattedText] = None,
style: str = "",
selected_style: str = "",
) -> None:
from prompt_toolkit.formatted_text import to_formatted_text
self.text = text
self.start_position = start_position
self._display_meta = display_meta
if display is None:
display = text
self.display = to_formatted_text(display)
self.style = style
self.selected_style = selected_style
assert self.start_position <= 0
def __repr__(self) -> str:
if isinstance(self.display, str) and self.display == self.text:
return "%s(text=%r, start_position=%r)" % (
self.__class__.__name__,
self.text,
self.start_position,
)
else:
return "%s(text=%r, start_position=%r, display=%r)" % (
self.__class__.__name__,
self.text,
self.start_position,
self.display,
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Completion):
return False
return (
self.text == other.text
and self.start_position == other.start_position
and self.display == other.display
and self._display_meta == other._display_meta
)
def __hash__(self) -> int:
return hash((self.text, self.start_position, self.display, self._display_meta))
@property
def display_text(self) -> str:
" The 'display' field as plain text. "
from prompt_toolkit.formatted_text import fragment_list_to_text
return fragment_list_to_text(self.display)
@property
def display_meta(self) -> StyleAndTextTuples:
" Return meta-text. (This is lazy when using a callable). "
from prompt_toolkit.formatted_text import to_formatted_text
return to_formatted_text(self._display_meta or "")
@property
def display_meta_text(self) -> str:
" The 'meta' field as plain text. "
from prompt_toolkit.formatted_text import fragment_list_to_text
return fragment_list_to_text(self.display_meta)
def new_completion_from_position(self, position: int) -> "Completion":
"""
(Only for internal use!)
Get a new completion by splitting this one. Used by `Application` when
it needs to have a list of new completions after inserting the common
prefix.
"""
assert position - self.start_position >= 0
return Completion(
text=self.text[position - self.start_position :],
display=self.display,
display_meta=self._display_meta,
)
class CompleteEvent:
"""
Event that called the completer.
:param text_inserted: When True, it means that completions are requested
because of a text insert. (`Buffer.complete_while_typing`.)
:param completion_requested: When True, it means that the user explicitly
pressed the `Tab` key in order to view the completions.
These two flags can be used for instance to implement a completer that
shows some completions when ``Tab`` has been pressed, but not
automatically when the user presses a space. (Because of
`complete_while_typing`.)
"""
def __init__(
self, text_inserted: bool = False, completion_requested: bool = False
) -> None:
assert not (text_inserted and completion_requested)
#: Automatic completion while typing.
self.text_inserted = text_inserted
#: Used explicitly requested completion by pressing 'tab'.
self.completion_requested = completion_requested
def __repr__(self) -> str:
return "%s(text_inserted=%r, completion_requested=%r)" % (
self.__class__.__name__,
self.text_inserted,
self.completion_requested,
)
class Completer(metaclass=ABCMeta):
"""
Base class for completer implementations.
"""
@abstractmethod
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
"""
This should be a generator that yields :class:`.Completion` instances.
If the generation of completions is something expensive (that takes a
lot of time), consider wrapping this `Completer` class in a
`ThreadedCompleter`. In that case, the completer algorithm runs in a
background thread and completions will be displayed as soon as they
arrive.
:param document: :class:`~prompt_toolkit.document.Document` instance.
:param complete_event: :class:`.CompleteEvent` instance.
"""
while False:
yield
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
"""
Asynchronous generator for completions. (Probably, you won't have to
override this.)
Asynchronous generator of :class:`.Completion` objects.
"""
for item in self.get_completions(document, complete_event):
yield item
class ThreadedCompleter(Completer):
"""
Wrapper that runs the `get_completions` generator in a thread.
(Use this to prevent the user interface from becoming unresponsive if the
generation of completions takes too much time.)
The completions will be displayed as soon as they are produced. The user
can already select a completion, even if not all completions are displayed.
"""
def __init__(self, completer: Completer) -> None:
self.completer = completer
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
return self.completer.get_completions(document, complete_event)
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
"""
Asynchronous generator of completions.
"""
async for completion in generator_to_async_generator(
lambda: self.completer.get_completions(document, complete_event)
):
yield completion
def __repr__(self) -> str:
return "ThreadedCompleter(%r)" % (self.completer,)
class DummyCompleter(Completer):
"""
A completer that doesn't return any completion.
"""
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
return []
def __repr__(self) -> str:
return "DummyCompleter()"
class DynamicCompleter(Completer):
"""
Completer class that can dynamically returns any Completer.
:param get_completer: Callable that returns a :class:`.Completer` instance.
"""
def __init__(self, get_completer: Callable[[], Optional[Completer]]) -> None:
self.get_completer = get_completer
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
completer = self.get_completer() or DummyCompleter()
return completer.get_completions(document, complete_event)
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
completer = self.get_completer() or DummyCompleter()
async for completion in completer.get_completions_async(
document, complete_event
):
yield completion
def __repr__(self) -> str:
return "DynamicCompleter(%r -> %r)" % (self.get_completer, self.get_completer())
class _MergedCompleter(Completer):
"""
Combine several completers into one.
"""
def __init__(self, completers: Sequence[Completer]) -> None:
self.completers = completers
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
# Get all completions from the other completers in a blocking way.
for completer in self.completers:
for c in completer.get_completions(document, complete_event):
yield c
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
# Get all completions from the other completers in a blocking way.
for completer in self.completers:
async for item in completer.get_completions_async(document, complete_event):
yield item
def merge_completers(completers: Sequence[Completer]) -> _MergedCompleter:
"""
Combine several completers into one.
"""
return _MergedCompleter(completers)
def get_common_complete_suffix(
document: Document, completions: Sequence[Completion]
) -> str:
"""
Return the common prefix for all completions.
"""
# Take only completions that don't change the text before the cursor.
def doesnt_change_before_cursor(completion: Completion) -> bool:
end = completion.text[: -completion.start_position]
return document.text_before_cursor.endswith(end)
completions2 = [c for c in completions if doesnt_change_before_cursor(c)]
# When there is at least one completion that changes the text before the
# cursor, don't return any common part.
if len(completions2) != len(completions):
return ""
# Return the common prefix.
def get_suffix(completion: Completion) -> str:
return completion.text[-completion.start_position :]
return _commonprefix([get_suffix(c) for c in completions2])
def _commonprefix(strings: Iterable[str]) -> str:
# Similar to os.path.commonprefix
if not strings:
return ""
else:
s1 = min(strings)
s2 = max(strings)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
| {
"content_hash": "13732c39f024b8e5167fd85797122221",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 88,
"avg_line_length": 32.99140401146132,
"alnum_prop": 0.6405245787736669,
"repo_name": "jonathanslenders/python-prompt-toolkit",
"id": "821cc5b7254641f93a4349974fa8fae20e3ce366",
"size": "11514",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "prompt_toolkit/completion/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1191674"
}
],
"symlink_target": ""
} |
import dwolla
import urlparse
import json
import webapp2
from webapp2_extras import sessions
from webapp2_extras import jinja2
from google.appengine.ext import db
from webapp2 import RequestHandler, WSGIApplication
class BaseHandler(RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
try:
RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
def base_url(self, secure=False):
o = urlparse.urlsplit(self.request.url)
scheme = 'https' if secure else o.scheme
if o.port:
return "%s://%s:%d" % (scheme, o.netloc, o.port)
return "%s://%s" % (scheme, o.netloc)
def app_url(self, path="/", secure=False):
return "%s%s" % (self.base_url(secure=secure), path)
def render_template(self, filename, **template_args):
self.response.write(self.jinja2.render_template(filename, **template_args))
@webapp2.cached_property
def jinja2(self):
return jinja2.get_jinja2(app=self.app)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
@webapp2.cached_property
def dwolla(self):
apikey = self.app.config['DWOLLA_API_KEY']
secret = self.app.config['DWOLLA_API_SECRET']
return dwolla.DwollaClientApp(apikey, secret)
class UserHandler(BaseHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
if not self.session.get('user'):
self.redirect(self.app_url('/login'))
self.user = db.get( db.Key(session['user']) )
try:
RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
class LoginHandler(BaseHandler):
def get(self):
permissions = self.app.config["DWOLLA_API_PERMISSIONS"]
redirect_uri = self.app_url('/oauth_cb')
auth_url = self.dwolla.init_oauth_url(redirect_uri, permissions)
self.redirect(auth_url)
class DwollaOauthHandler(BaseHandler):
def get(self):
code = self.request.get("code")
redirect_uri = self.app_url('/oauth_cb')
token = self.dwolla.get_oauth_token(code, redirect_uri=redirect_uri)
api = dwolla.DwollaUser(token)
account = api.get_account_info()
self.session['user'] = account
self.session['account'] = str(account['Id'])
self.redirect("/")
class MainHandler(BaseHandler):
def get(self):
item_key = self.request.get('k')
try:
item = db.get(db.Key(item_key))
except:
return self.redirect(self.app_url("/new"))
apikey = self.app.config['DWOLLA_API_KEY']
secret = self.app.config['DWOLLA_API_SECRET']
gateway = dwolla.DwollaGateway(apikey, secret, self.app_url('/gateway'))
gateway.start_gateway_session()
gateway.add_gateway_product(item.account, item.amount, desc=item.text, qty = 1)
try:
url = gateway.get_gateway_URL(item.account, callback=self.app_url('/confirm'))
self.render_template('show.html', url=url, item=item)
except Exception as e:
self.session['account'] = item.account
self.session['amount'] = item.amount
self.session['text'] = item.text
self.redirect('/new?err='+str(e))
class LogoutHandler(BaseHandler):
def get(self):
if 'user' in self.session:
del self.session['user']
if 'account' in self.session:
del self.session['account']
if 'text' in self.session:
del self.session['text']
if 'amount' in self.session:
del self.session['amount']
self.redirect("/")
class NewHandler(BaseHandler):
def get(self):
err = self.request.get('err', None)
account = self.session.get('account', '000-000-0000')
self.render_template('new.html', account=account, error=err, session=self.session)
def post(self):
item = db.Expando()
item.account = self.request.get('dwolla_id')
item.text = self.request.get('text')
try:
amount = float(self.request.get('amount').replace("$", ""))
item.amount = amount
item.put()
self.redirect('/?k=%s' % item.key())
except:
amount = "1.0"
item.amount = amount
item.put()
self.session['account'] = item.account
self.session['amount'] = "0"
self.session['text'] = item.text
self.redirect('/new?err=Invalid%20Amount.')
class ConfirmHandler(BaseHandler):
def get(self):
self.render_template('confirm.html')
class GatewayHandler(BaseHandler):
def get(self):
err = self.request.get('error')
text = self.request.get('error_description')
self.render_template('gateway.html', error=err, text=text)
class PaidHandler(BaseHandler):
def get(self):
self.render_template(self.request)
import config
app_routes = [
('/', MainHandler),
('/login', LoginHandler),
('/logout', LogoutHandler),
('/oauth_cb', DwollaOauthHandler),
('/new', NewHandler),
('/confirm', ConfirmHandler),
('/gateway', GatewayHandler),
]
app = webapp2.WSGIApplication(app_routes,
config=config.dev,
debug=True
)
| {
"content_hash": "faf09fa1b267db7b4ecd4ee06dfd9878",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 90,
"avg_line_length": 31.63372093023256,
"alnum_prop": 0.6118360595478772,
"repo_name": "hansent/dwollaup",
"id": "45626b8ed042f581afd15de28a8ae3e22304ac8b",
"size": "5441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30643"
}
],
"symlink_target": ""
} |
from urllib.request import Request, urlopen
from urllib.parse import quote, urlencode
from urllib.error import HTTPError
import json
_baseurl = "https://api.twitch.tv/kraken"
_apiversionheader = "application/vnd.twitchtv.v2+json"
class TwitchAPI(object):
def __init__(self, client_id):
self.client_id = client_id
def _perform_request(self, path):
req = Request(_baseurl + path, None, {'Accept': _apiversionheader, 'Client-ID': self.client_id})
return json.loads(urlopen(req).readall().decode('utf-8'))
@staticmethod
def _build_qs(limit=25, offset=0, hls=False, embeddable=False, channel=None, game=None, broadcasts=False, q=None, period='week'):
query = {}
if game is not None:
query['game'] = game
if channel is not None:
query['channel'] = channel.lower()
if limit is not 25:
if limit > 100 or limit <= 0:
raise ValueError('limit must be in the range 1-100 (inclusive)')
query['limit'] = limit
if offset is not 0:
if offset < 0:
raise ValueError('offset cannot be negative')
query['offset'] = offset
if embeddable:
query['embeddable'] = 'true'
if hls:
query['hls'] = 'true'
if broadcasts:
query['broadcasts'] = 'true'
if q is not None:
query['q'] = q
if period is not 'week':
if period not in ('week', 'month', 'all'):
raise ValueError('Invalid option for period. Valid choices are "week", "month" and "all"')
return urlencode(query)
def get_streams(self, game=None, channel=None, limit=25, offset=0, embeddable=False, hls=False):
query_string = TwitchAPI._build_qs(limit, offset, hls, embeddable, channel, game)
if query_string is not '':
return self._perform_request('/streams?' + query_string)
return self._perform_request('/streams')
def get_stream(self, channel_name):
return self._perform_request('/streams/' + quote(channel_name, ''))
def get_featured_streams(self, limit=25, offset=0, hls=False):
query_string = TwitchAPI._build_qs(limit, offset, hls)
if query_string is not '':
return self._perform_request('/streams/featured?' + query_string)
return self._perform_request('/streams/featured')
def get_streams_summary(self, limit=25, offset=0, hls=False):
query_string = TwitchAPI._build_qs(limit, offset, hls)
if query_string is not '':
return self._perform_request('/streams/summary?' + query_string)
return self._perform_request('/streams/summary')
def get_channel(self, channel_name):
return self._perform_request('/channel/' + quote(channel_name, ''))
def get_channel_videos(self, channel_name, limit=10, offset=0, broadcasts=False):
query_string = TwitchAPI._build_qs(limit, offset, broadcasts=broadcasts)
if query_string is not '':
return self._perform_request('/channels/' + quote(channel_name, '') + '/videos?' + query_string)
return self._perform_request('/channels/' + quote(channel_name, '') + '/videos')
def get_channel_followers(self, channel_name, limit=25, offset=0):
query_string = TwitchAPI._build_qs(limit, offset)
if query_string is not '':
return self._perform_request('/channels/' + quote(channel_name, '') + '/follows?' + query_string)
return self._perform_request('/channels/' + quote(channel_name, '') + '/follows')
def get_chat_emoticons(self):
return self._perform_request('/chat/emoticons')
def get_user_follows(self, username, limit=25, offset=0):
query_string = TwitchAPI._build_qs(limit, offset)
if query_string is not '':
return self._perform_request('/users/' + quote(username, '') + '/follows/channels?' + query_string)
return self._perform_request('/users/' + quote(username, '') + '/follows/channels')
def does_user_follow_channel(self, username, channel_name):
req = Request(_baseurl + '/users/' + quote(username, '') + '/follows/channels/' + quote(channel_name, None),
None,
{'Accept': _apiversionheader, 'Client-ID': self.client_id},
method='HEAD'
)
try:
return urlopen(req).status is 200
except HTTPError:
return False
def get_top_games(self, limit=25, offset=0, hls=False):
query_string = TwitchAPI._build_qs(limit, offset, hls)
if query_string is not '':
return self._perform_request('/games/top?' + query_string)
return self._perform_request('/games/top')
def search_streams(self, query, limit=25, offset=0):
return self._perform_request('/search/streams?' + TwitchAPI._build_qs(limit, offset, q=query))
def search_games(self, query, limit=25, offset=0):
return self._perform_request('/search/games?' + TwitchAPI._build_qs(limit, offset, q=query))
def get_teams(self, limit=25, offset=0):
query_string = TwitchAPI._build_qs(limit, offset)
if query_string is not '':
return self._perform_request('/teams?' + query_string)
return self._perform_request('/teams')
def get_team(self, team_name):
return self._perform_request('/teams/' + quote(team_name, ''))
def get_user(self, username):
return self._perform_request('/users/' + quote(username, ''))
def get_video(self, video_id):
return self._perform_request('/videos/' + quote(video_id, ''))
def get_top_videos(self, limit=10, offset=0, game='', period="week"):
query_string = TwitchAPI._build_qs(limit, offset, game=game, period=period)
if query_string is not '':
return self._perform_request('/videos/top?' + query_string)
return self._perform_request('/videos/top')
api = TwitchAPI("dg5z0vaar0gesf8r3ah8ifgh1r7yekb")
| {
"content_hash": "ba9350adfe2880ee8431ef23a6a82a93",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 133,
"avg_line_length": 44.175182481751825,
"alnum_prop": 0.6112029081295439,
"repo_name": "nicka101/CloudBroWeb",
"id": "9b964bdd650b15c392394e8a51e2a6095bc8cf97",
"size": "6052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitch/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "262876"
},
{
"name": "HTML",
"bytes": "13493"
},
{
"name": "JavaScript",
"bytes": "7244"
},
{
"name": "Python",
"bytes": "16023"
}
],
"symlink_target": ""
} |
"""Implementation of compile_html based on CreoleWiki."""
import codecs
import os
try:
from creole import Parser
from creole.html_emitter import HtmlEmitter
creole = True
except ImportError:
creole = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing
class CompileWiki(PageCompiler):
"""Compile CreoleWiki into HTML."""
name = "wiki"
demote_headers = True
supports_onefile = False
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
if creole is None:
req_missing(['creole'], 'build this site (compile CreoleWiki)')
makedirs(os.path.dirname(dest))
with codecs.open(dest, "w+", "utf8") as out_file:
with codecs.open(source, "r", "utf8") as in_file:
data = in_file.read()
document = Parser(data).parse()
output = HtmlEmitter(document).emit()
out_file.write(output)
def compile_html(self, source, dest, is_two_file=True):
"""Compile the post into HTML (deprecated API)."""
try:
post = self.site.post_per_input_file[source]
except KeyError:
post = None
return compile(source, dest, is_two_file, post, None)
def create_post(self, path, **kw):
content = kw.pop('content', 'Write your post here.')
onefile = kw.pop('onefile', False)
# is_page is not used by create_post as of now.
kw.pop('is_page', False)
if onefile:
raise Exception('The one-file format is not supported by this compiler.')
if not content.endswith('\n'):
content += '\n'
with codecs.open(path, "wb+", "utf8") as fd:
fd.write(content)
| {
"content_hash": "8600e541b0419807f7d09a7cea17ec7e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 33.43636363636364,
"alnum_prop": 0.6084828711256117,
"repo_name": "getnikola/plugins",
"id": "2284f6d1f14e2542f8547f2aab079d28bfd07a7f",
"size": "2981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v7/wiki/wiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8729"
},
{
"name": "Emacs Lisp",
"bytes": "8804"
},
{
"name": "HTML",
"bytes": "2470"
},
{
"name": "JavaScript",
"bytes": "41087"
},
{
"name": "Python",
"bytes": "1157045"
},
{
"name": "TeX",
"bytes": "844"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import setup_util
from os.path import expanduser
import os
import getpass
def start(args, logfile, errfile):
setup_util.replace_text("web-simple/app.pl", "localhost", args.database_host)
setup_util.replace_text("web-simple/nginx.conf", "USR", getpass.getuser())
setup_util.replace_text("web-simple/nginx.conf", "server unix:.*\/FrameworkBenchmarks/web-simple", "server unix:" + args.troot)
try:
subprocess.Popen("plackup -E production -s Starman --workers=" + str(args.max_threads) + " -l $TROOT/frameworks-benchmark.sock -a $TROOT/app.pl", shell=True, cwd="web-simple", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c $TROOT/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'starman' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
return 0
except subprocess.CalledProcessError:
return 1
| {
"content_hash": "f0647b6167b73f2ee7d1080f7dd70fee",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 211,
"avg_line_length": 42.4,
"alnum_prop": 0.7075471698113207,
"repo_name": "ratpack/FrameworkBenchmarks",
"id": "7381de8a198c86c3f49970abefc997ff357491b6",
"size": "1272",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "frameworks/Perl/web-simple/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "838"
},
{
"name": "C",
"bytes": "39732"
},
{
"name": "C#",
"bytes": "128703"
},
{
"name": "C++",
"bytes": "402630"
},
{
"name": "CSS",
"bytes": "234858"
},
{
"name": "Clojure",
"bytes": "18787"
},
{
"name": "Dart",
"bytes": "35750"
},
{
"name": "Elixir",
"bytes": "1912"
},
{
"name": "Erlang",
"bytes": "7670"
},
{
"name": "Go",
"bytes": "35314"
},
{
"name": "Groovy",
"bytes": "15587"
},
{
"name": "Haskell",
"bytes": "8771"
},
{
"name": "Java",
"bytes": "264212"
},
{
"name": "JavaScript",
"bytes": "395155"
},
{
"name": "Lua",
"bytes": "7463"
},
{
"name": "MoonScript",
"bytes": "2204"
},
{
"name": "Nim",
"bytes": "32032"
},
{
"name": "PHP",
"bytes": "17587921"
},
{
"name": "Perl",
"bytes": "18774"
},
{
"name": "PowerShell",
"bytes": "35514"
},
{
"name": "Prolog",
"bytes": "317"
},
{
"name": "Python",
"bytes": "413446"
},
{
"name": "Racket",
"bytes": "5298"
},
{
"name": "Ruby",
"bytes": "73849"
},
{
"name": "Scala",
"bytes": "62267"
},
{
"name": "Shell",
"bytes": "114520"
},
{
"name": "Volt",
"bytes": "677"
}
],
"symlink_target": ""
} |
# store the string written to screens so can check it from test
current_drawn_strings = []
timer_running = False
KEY_MAP = { "down" : 40, "up": 38, "S" : 83, "W" : 87, "H" : 72}
class Label:
def set_text(l, text):
return Label()
class Frame:
def add_button(f, label, handler, width=100):
pass
def add_input(f, label, handler, width):
pass
def start(f):
pass
def set_draw_handler(f, function):
pass
def set_keydown_handler(f, key):
pass
def set_keyup_handler(f, key):
pass
def add_label(f, label):
return Label()
def set_mouseclick_handler(f, handler):
pass
class Timer:
def start(t):
global timer_running
timer_running = True
def stop(t):
global timer_running
timer_running = False
pass
class Canvas:
def draw_text(c, str, coords, font, color):
global current_drawn_string
current_drawn_strings.append(str)
pass
def draw_polygon(c, point_list, line_width, line_color, fill_color):
pass
def create_frame(title, canvas_width, canvas_height, control_width=200):
return Frame()
def create_timer(time, function):
return Timer()
| {
"content_hash": "0c30c8df2635dee811eb107d93b7efc9",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 72,
"avg_line_length": 23.274509803921568,
"alnum_prop": 0.6267902274641954,
"repo_name": "hemmerling/python-coursera2012",
"id": "4f8ddf199bf8097da9c2eb137b2b6b965a6eef29",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/week5/simplegui.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "316418"
}
],
"symlink_target": ""
} |
import contextlib
import glob
import hashlib
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import tempfile
import threading
import time
import zipfile
from urllib.error import URLError
from urllib.request import urlopen
import grpc
from apache_beam.version import __version__ as beam_version
_LOGGER = logging.getLogger(__name__)
class SubprocessServer(object):
"""An abstract base class for running GRPC Servers as an external process.
This class acts as a context which will start up a server, provides a stub
to connect to it, and then shuts the server down. For example::
with SubprocessServer(GrpcStubClass, [executable, arg, ...]) as stub:
stub.CallService(...)
"""
def __init__(self, stub_class, cmd, port=None):
"""Creates the server object.
:param stub_class: the auto-generated GRPC client stub class used for
connecting to the GRPC service
:param cmd: command (including arguments) for starting up the server,
suitable for passing to `subprocess.POpen`.
:param port: (optional) the port at which the subprocess will serve its
service. If not given, one will be randomly chosen and the special
string "{{PORT}}" will be substituted in the command line arguments
with the chosen port.
"""
self._process_lock = threading.RLock()
self._process = None
self._stub_class = stub_class
self._cmd = [str(arg) for arg in cmd]
self._port = port
def __enter__(self):
return self.start()
def __exit__(self, *unused_args):
self.stop()
def start(self):
try:
endpoint = self.start_process()
wait_secs = .1
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
channel = grpc.insecure_channel(endpoint, options=channel_options)
channel_ready = grpc.channel_ready_future(channel)
while True:
if self._process is not None and self._process.poll() is not None:
_LOGGER.error("Starting job service with %s", self._process.args)
raise RuntimeError(
'Service failed to start up with error %s' % self._process.poll())
try:
channel_ready.result(timeout=wait_secs)
break
except (grpc.FutureTimeoutError, grpc.RpcError):
wait_secs *= 1.2
logging.log(
logging.WARNING if wait_secs > 1 else logging.DEBUG,
'Waiting for grpc channel to be ready at %s.',
endpoint)
return self._stub_class(channel)
except: # pylint: disable=bare-except
_LOGGER.exception("Error bringing up service")
self.stop()
raise
def start_process(self):
with self._process_lock:
if self._process:
self.stop()
if self._port:
port = self._port
cmd = self._cmd
else:
port, = pick_port(None)
cmd = [arg.replace('{{PORT}}', str(port)) for arg in self._cmd]
endpoint = 'localhost:%s' % port
_LOGGER.info("Starting service with %s", str(cmd).replace("',", "'"))
self._process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Emit the output of this command as info level logging.
def log_stdout():
line = self._process.stdout.readline()
while line:
# The log obtained from stdout is bytes, decode it into string.
# Remove newline via rstrip() to not print an empty line.
_LOGGER.info(line.decode(errors='backslashreplace').rstrip())
line = self._process.stdout.readline()
t = threading.Thread(target=log_stdout)
t.daemon = True
t.start()
return endpoint
def stop(self):
self.stop_process()
def stop_process(self):
with self._process_lock:
if not self._process:
return
for _ in range(5):
if self._process.poll() is not None:
break
logging.debug("Sending SIGINT to job_server")
self._process.send_signal(signal.SIGINT)
time.sleep(1)
if self._process.poll() is None:
self._process.kill()
self._process = None
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarServer(SubprocessServer):
MAVEN_CENTRAL_REPOSITORY = 'https://repo.maven.apache.org/maven2'
BEAM_GROUP_ID = 'org.apache.beam'
JAR_CACHE = os.path.expanduser("~/.apache_beam/cache/jars")
_BEAM_SERVICES = type(
'local', (threading.local, ),
dict(__init__=lambda self: setattr(self, 'replacements', {})))()
def __init__(self, stub_class, path_to_jar, java_arguments, classpath=None):
if classpath:
# java -jar ignores the classpath, so we make a new jar that embeds
# the requested classpath.
path_to_jar = self.make_classpath_jar(path_to_jar, classpath)
super().__init__(
stub_class, ['java', '-jar', path_to_jar] + list(java_arguments))
self._existing_service = path_to_jar if _is_service_endpoint(
path_to_jar) else None
def start_process(self):
if self._existing_service:
return self._existing_service
else:
if not shutil.which('java'):
raise RuntimeError(
'Java must be installed on this system to use this '
'transform/runner.')
return super().start_process()
def stop_process(self):
if self._existing_service:
pass
else:
return super().stop_process()
@classmethod
def jar_name(cls, artifact_id, version, classifier=None, appendix=None):
return '-'.join(
filter(None, [artifact_id, appendix, version, classifier])) + '.jar'
@classmethod
def path_to_maven_jar(
cls,
artifact_id,
group_id,
version,
repository=MAVEN_CENTRAL_REPOSITORY,
classifier=None,
appendix=None):
return '/'.join([
repository,
group_id.replace('.', '/'),
artifact_id,
version,
cls.jar_name(artifact_id, version, classifier, appendix)
])
@classmethod
def path_to_beam_jar(
cls,
gradle_target,
appendix=None,
version=beam_version,
artifact_id=None):
if gradle_target in cls._BEAM_SERVICES.replacements:
return cls._BEAM_SERVICES.replacements[gradle_target]
gradle_package = gradle_target.strip(':').rsplit(':', 1)[0]
if not artifact_id:
artifact_id = 'beam-' + gradle_package.replace(':', '-')
project_root = os.path.sep.join(
os.path.abspath(__file__).split(os.path.sep)[:-5])
local_path = os.path.join(
project_root,
gradle_package.replace(':', os.path.sep),
'build',
'libs',
cls.jar_name(
artifact_id,
version.replace('.dev', ''),
classifier='SNAPSHOT',
appendix=appendix))
if os.path.exists(local_path):
_LOGGER.info('Using pre-built snapshot at %s', local_path)
return local_path
elif '.dev' in version:
# TODO: Attempt to use nightly snapshots?
raise RuntimeError(
(
'%s not found. '
'Please build the server with \n cd %s; ./gradlew %s') %
(local_path, os.path.abspath(project_root), gradle_target))
else:
return cls.path_to_maven_jar(
artifact_id,
cls.BEAM_GROUP_ID,
version,
cls.MAVEN_CENTRAL_REPOSITORY,
appendix=appendix)
@classmethod
def local_jar(cls, url, cache_dir=None):
if cache_dir is None:
cache_dir = cls.JAR_CACHE
# TODO: Verify checksum?
if _is_service_endpoint(url):
return url
elif os.path.exists(url):
return url
else:
cached_jar = os.path.join(cache_dir, os.path.basename(url))
if os.path.exists(cached_jar):
_LOGGER.info('Using cached job server jar from %s' % url)
else:
_LOGGER.info('Downloading job server jar from %s' % url)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# TODO: Clean up this cache according to some policy.
try:
url_read = urlopen(url)
with open(cached_jar + '.tmp', 'wb') as jar_write:
shutil.copyfileobj(url_read, jar_write, length=1 << 20)
os.rename(cached_jar + '.tmp', cached_jar)
except URLError as e:
raise RuntimeError(
'Unable to fetch remote job server jar at %s: %s' % (url, e))
return cached_jar
@classmethod
@contextlib.contextmanager
def beam_services(cls, replacements):
try:
old = cls._BEAM_SERVICES.replacements
cls._BEAM_SERVICES.replacements = dict(old, **replacements)
yield
finally:
cls._BEAM_SERVICES.replacements = old
@classmethod
def make_classpath_jar(cls, main_jar, extra_jars, cache_dir=None):
if cache_dir is None:
cache_dir = cls.JAR_CACHE
composite_jar_dir = os.path.join(cache_dir, 'composite-jars')
os.makedirs(composite_jar_dir, exist_ok=True)
classpath = []
# Class-Path references from a jar must be relative, so we create
# a relatively-addressable subdirectory with symlinks to all the
# required jars.
for pattern in [main_jar] + list(extra_jars):
for path in glob.glob(pattern) or [pattern]:
path = os.path.abspath(path)
rel_path = hashlib.sha256(
path.encode('utf-8')).hexdigest() + os.path.splitext(path)[1]
classpath.append(rel_path)
if not os.path.lexists(os.path.join(composite_jar_dir, rel_path)):
os.symlink(path, os.path.join(composite_jar_dir, rel_path))
# Now create a single jar that simply references the rest and has the same
# main class as main_jar.
composite_jar = os.path.join(
composite_jar_dir,
hashlib.sha256(' '.join(sorted(classpath)).encode('ascii')).hexdigest()
+ '.jar')
if not os.path.exists(composite_jar):
with zipfile.ZipFile(main_jar) as main:
with main.open('META-INF/MANIFEST.MF') as manifest:
main_class = next(
filter(lambda line: line.startswith(b'Main-Class: '), manifest))
with zipfile.ZipFile(composite_jar + '.tmp', 'w') as composite:
with composite.open('META-INF/MANIFEST.MF', 'w') as manifest:
manifest.write(b'Manifest-Version: 1.0\n')
manifest.write(main_class)
manifest.write(
b'Class-Path: ' + '\n '.join(classpath).encode('ascii') + b'\n')
os.rename(composite_jar + '.tmp', composite_jar)
return composite_jar
def _is_service_endpoint(path):
return re.match(r'^[a-zA-Z0-9.-]+:\d+$', path)
def pick_port(*ports):
"""
Returns a list of ports, same length as input ports list, but replaces
all None or 0 ports with a random free port.
"""
sockets = []
def find_free_port(port):
if port:
return port
else:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except OSError as e:
# [Errno 97] Address family not supported by protocol
# Likely indicates we are in an IPv6-only environment (BEAM-10618). Try
# again with AF_INET6.
if e.errno == 97:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
raise e
sockets.append(s)
s.bind(('localhost', 0))
return s.getsockname()[1]
ports = list(map(find_free_port, ports))
# Close sockets only now to avoid the same port to be chosen twice
for s in sockets:
s.close()
return ports
| {
"content_hash": "b44080b977b0485962e3b02302222a20",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 80,
"avg_line_length": 33.3132183908046,
"alnum_prop": 0.6195117743465884,
"repo_name": "lukecwik/incubator-beam",
"id": "f11132ca1643d6550dd028422a3176c5348ec3c4",
"size": "12399",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/utils/subprocess_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "70760"
},
{
"name": "Dart",
"bytes": "830463"
},
{
"name": "Dockerfile",
"bytes": "54446"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "5375910"
},
{
"name": "Groovy",
"bytes": "923345"
},
{
"name": "HCL",
"bytes": "101921"
},
{
"name": "HTML",
"bytes": "182819"
},
{
"name": "Java",
"bytes": "40844089"
},
{
"name": "JavaScript",
"bytes": "120093"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "215314"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "10573729"
},
{
"name": "SCSS",
"bytes": "318158"
},
{
"name": "Sass",
"bytes": "25936"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "362995"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "1955893"
}
],
"symlink_target": ""
} |
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| {
"content_hash": "1453970048a8dbf26fd7cd52bd3e493a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 19.4375,
"alnum_prop": 0.7234726688102894,
"repo_name": "JonathanFrederick/job-hunt",
"id": "1d11db3c754313470aaf7f4ede3f3a73f54096d5",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "634"
},
{
"name": "HTML",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "2871"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "16938"
}
],
"symlink_target": ""
} |
import sdl2
import sdl2.ext
__all__ = []
DRAW_CONTEXT = None
class DrawContext:
def __init__(self, surface):
self.old_surface = None
self.surface = surface
def __enter__(self):
global DRAW_CONTEXT
self.old_surface = DRAW_CONTEXT
DRAW_CONTEXT = self.surface
def __exit__(self, *args):
global DRAW_CONTEXT
DRAW_CONTEXT = self.old_surface
self.old_surface = None
def context(surface=None):
if surface is None:
return DrawContext(DRAW_CONTEXT)
return DrawContext(surface)
def surface(size, alpha=False):
if alpha:
masks = (0xff000000,0x00ff0000,0x0000ff00,0x000000ff)
else:
masks = (0,0,0,0)
s = sdl2.SDL_CreateRGBSurface(0,size[0],size[1],32,*masks);
return s
def rectangle(color, size, position=(0, 0), alpha=False):
color = sdl2.ext.convert_to_color(color)
s = DRAW_CONTEXT
if alpha:
color = sdl2.SDL_MapRGBA(s.contents.format.contents, color.r, color.g, color.b, color.a)
else:
color = sdl2.SDL_MapRGB(s.contents.format.contents, color.r, color.g, color.b)
if size is None:
sdl2.SDL_FillRect(s, None, color)
else:
r = sdl2.SDL_Rect(*position, *size)
sdl2.SDL_FillRect(s, r, color)
| {
"content_hash": "6c62079827832950212e8c2385258d37",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 96,
"avg_line_length": 27.340425531914892,
"alnum_prop": 0.6186770428015564,
"repo_name": "JaniM/PyBasic",
"id": "1542b6f84e7501fefdd7340c8ea1dfeb570355e3",
"size": "1286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pybasic/draw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12626"
}
],
"symlink_target": ""
} |
"""Tests for common methods in strategy classes."""
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute.collective_all_reduce_strategy import CollectiveAllReduceStrategy
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
] + strategy_combinations.all_strategies,
mode=['eager']))
class StrategyTest(test.TestCase, parameterized.TestCase):
def testCaptureReplicaId(self, strategy):
m = {}
@def_function.function
def f():
return ds_context.get_replica_context().replica_id_in_sync_group
@def_function.function
def g():
# Make g() a stateful function so it's traced twice.
if m.get('v', None) is None:
m['v'] = variables.Variable(0.)
return strategy.run(f)
g()
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.tpu_strategy
],
mode=['graph', 'eager']))
class StrategyLocalResultTest(test.TestCase):
def testLocalResultForDictionary(self, distribution):
@def_function.function
def model_fn():
return {'a': constant_op.constant(1.), 'b': constant_op.constant(2.)}
with distribution.scope():
result = distribution.run(model_fn)
got = self.evaluate(distribution.experimental_local_results(result))
self.assertEqual(got, ({'a': 1., 'b': 2.}, {'a': 1., 'b': 2.}))
def testLocalResultForList(self, distribution):
@def_function.function
def model_fn():
return [constant_op.constant(1.), constant_op.constant(2.)]
with distribution.scope():
result = distribution.run(model_fn)
got = self.evaluate(distribution.experimental_local_results(result))
self.assertEqual(got, ([1., 2.], [1., 2.]))
def testLocalResultForTuple(self, distribution):
@def_function.function
def model_fn():
return (constant_op.constant(1.), constant_op.constant(2.),
constant_op.constant(3.))
with distribution.scope():
result = distribution.run(model_fn)
got = self.evaluate(distribution.experimental_local_results(result))
self.assertEqual(got, ((1., 2., 3.), (1., 2., 3.)))
def testLocalResultForNestedStruct(self, distribution):
@def_function.function
def model_fn():
return ({
'a': constant_op.constant(1.),
'b': constant_op.constant(2.)
}, {
'a': constant_op.constant(4.),
'b': constant_op.constant(6.)
})
with distribution.scope():
result = distribution.run(model_fn)
got = self.evaluate(distribution.experimental_local_results(result))
self.assertEqual(got, (({
'a': 1.,
'b': 2.
}, {
'a': 4.,
'b': 6.
}), ({
'a': 1.,
'b': 2.
}, {
'a': 4.,
'b': 6.
})))
def testLocalResultForNestedStructWithoutTensor(self, distribution):
@def_function.function
def model_fn():
return {'a': 1., 'b': 2.}
with distribution.scope():
result = distribution.run(model_fn)
v = self.evaluate(distribution.experimental_local_results(result))
self.assertIsInstance(v, tuple)
self.assertAllEqual(v, ({'a': 1., 'b': 2.}, {'a': 1., 'b': 2.}))
def testLocalResultForScalarValue(self, distribution):
@def_function.function
def model_fn():
return distribution.extended._get_local_replica_id(
ds_context.get_replica_context().replica_id_in_sync_group)
with distribution.scope():
result = distribution.run(model_fn)
v = self.evaluate(distribution.experimental_local_results(result))
self.assertIsInstance(v, tuple)
self.assertEqual(v, (0, 1))
def testLocalResultForDictionaryDifferentReplicas(self, distribution):
@def_function.function
def model_fn():
replica_id = distribution.extended._get_local_replica_id(
ds_context.get_replica_context().replica_id_in_sync_group)
return {
'a': math_ops.cast(replica_id + 1, dtype=float),
'b': math_ops.cast(replica_id + 2, dtype=float)
}
with distribution.scope():
result = distribution.run(model_fn)
got = self.evaluate(distribution.experimental_local_results(result))
self.assertAllEqual(got, ({'a': 1., 'b': 2.}, {'a': 2., 'b': 3.}))
def testLocalResultForTensor(self, distribution):
@def_function.function
def model_fn():
return constant_op.constant([2., 3.])
with distribution.scope():
result = distribution.run(model_fn)
v = self.evaluate(distribution.experimental_local_results(result))
self.assertAllEqual(v, ([2., 3.], [2., 3.]))
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
] + strategy_combinations.all_strategies,
mode=['eager']))
class ReduceTest(test.TestCase, parameterized.TestCase):
def testBasic(self, strategy):
per_replica_value = strategy.experimental_distribute_values_from_function(
lambda _: array_ops.ones((), dtypes.float32))
def fn_eager():
return strategy.reduce(
reduce_util.ReduceOp.SUM, value=per_replica_value, axis=None)
fn_graph = def_function.function(fn_eager)
# Run reduce under the strategy scope to explicitly enter
# strategy default_device scope.
with strategy.scope():
self.assertEqual(fn_eager().numpy(), 1.0 * strategy.num_replicas_in_sync)
self.assertEqual(fn_graph().numpy(), 1.0 * strategy.num_replicas_in_sync)
# Run reduce without a strategy scope to implicitly enter
# strategy default_device scope.
self.assertEqual(fn_eager().numpy(), 1.0 * strategy.num_replicas_in_sync)
self.assertEqual(fn_graph().numpy(), 1.0 * strategy.num_replicas_in_sync)
def testAxis(self, strategy):
@def_function.function
def fn():
return constant_op.constant([1., 2.])
x = strategy.run(fn)
x_m = strategy.reduce(reduce_util.ReduceOp.MEAN, x, axis=0)
self.assertEqual(1.5, x_m)
x_s = strategy.reduce(reduce_util.ReduceOp.SUM, x, axis=0)
self.assertEqual(3 * strategy.num_replicas_in_sync, x_s)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.default_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
],
update_fn=['assign', 'assign_add', 'assign_sub'],
tf_function=[True, False],
mode=['eager']))
class ReplicaCtxUpdateTest(test.TestCase, parameterized.TestCase):
def testDenseUpdate(self, strategy, tf_function, update_fn):
if strategy_test_lib.is_tpu_strategy(strategy) and (not tf_function):
self.skipTest('Skip TPUStrategy + eager combination.')
with strategy.scope():
distributed_variable1 = variables.Variable(5.0)
def replica_fn():
value = array_ops.constant(2.)
python_literal = 1.
replica_context = ds_context.get_replica_context()
fn_sets = {
'assign': lambda var, value: var.assign(value),
'assign_add': lambda var, value: var.assign_add(value),
'assign_sub': lambda var, value: var.assign_sub(value),
}
replica_context._update(
distributed_variable1, fn_sets[update_fn], args=(value,))
replica_context._update(
distributed_variable1, fn_sets[update_fn], args=(python_literal,))
if tf_function:
replica_fn = def_function.function(replica_fn)
strategy.run(replica_fn)
expected_result = {'assign': 1., 'assign_add': 8., 'assign_sub': 2.}
self.assertAllEqual(
strategy.experimental_local_results(distributed_variable1),
[expected_result[update_fn]] * _get_num_replicas_per_client(strategy))
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.tpu_strategy,
] + strategy_combinations.strategies_minus_tpu,
tf_function=[combinations.tf_function, combinations.no_tf_function],
mode=['eager']))
class ReplicaCtxAllReduceTest(test.TestCase, parameterized.TestCase):
def testDense(self, strategy, tf_function):
if (strategy_test_lib.is_tpu_strategy(strategy) and
tf_function is combinations.no_tf_function):
self.skipTest('Skip TPUStrategy + eager combination.')
@tf_function
def fn():
def replica_fn():
value = array_ops.identity(1.0)
reduced = strategy.extended._replica_ctx_all_reduce(
reduce_util.ReduceOp.SUM, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
self.assertEqual(got, 1.0 * strategy.num_replicas_in_sync)
def testSparse(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
reduced = strategy.extended._replica_ctx_all_reduce(
reduce_util.ReduceOp.SUM, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
expect = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
self.assertAllEqual(
ops.convert_to_tensor(got), ops.convert_to_tensor(expect))
def testNestedInput(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value = (array_ops.identity(1.0),
indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
array_ops.identity(2.0),
indexed_slices.IndexedSlices(
values=array_ops.identity([[2.0]]),
indices=array_ops.identity([1]),
dense_shape=array_ops.identity([5, 1])))
reduced = strategy.extended._replica_ctx_all_reduce(
reduce_util.ReduceOp.SUM, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
expect = (1.0 * strategy.num_replicas_in_sync,
indexed_slices.IndexedSlices(
values=array_ops.identity(
[[1.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
2.0 * strategy.num_replicas_in_sync,
indexed_slices.IndexedSlices(
values=array_ops.identity(
[[2.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([1]),
dense_shape=array_ops.identity([5, 1])))
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.tpu_strategy,
] + strategy_combinations.strategies_minus_tpu,
tf_function=[combinations.tf_function, combinations.no_tf_function],
mode=['eager']))
class AllReduceTest(test.TestCase, parameterized.TestCase):
def testDense(self, strategy, tf_function):
if (strategy_test_lib.is_tpu_strategy(strategy) and
tf_function is combinations.no_tf_function):
self.skipTest('Skip TPUStrategy + eager combination.')
@tf_function
def fn():
def replica_fn():
value = array_ops.identity(1.0)
rep_ctx = ds_context.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.SUM, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
self.assertEqual(got, 1.0 * strategy.num_replicas_in_sync)
def testSparse(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
rep_ctx = ds_context.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.MEAN, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
if not strategy_test_lib.is_tpu_strategy(strategy):
self.assertIsInstance(got, indexed_slices.IndexedSlices)
expect = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
self.assertAllEqual(
ops.convert_to_tensor(got), ops.convert_to_tensor(expect))
def testSparseTuple(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value1 = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
value2 = indexed_slices.IndexedSlices(
values=array_ops.identity([[2.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
rep_ctx = ds_context.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.SUM, [value1, value2])
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
if not strategy_test_lib.is_tpu_strategy(strategy):
for g in got:
self.assertIsInstance(g, indexed_slices.IndexedSlices)
expect = [
indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
indexed_slices.IndexedSlices(
values=array_ops.identity([[2.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
]
self.assertAllEqual(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
def testNestedInput(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value = (array_ops.identity(1.0),
indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
array_ops.identity(2.0),
indexed_slices.IndexedSlices(
values=array_ops.identity([[2.0]]),
indices=array_ops.identity([1]),
dense_shape=array_ops.identity([5, 1])))
rep_ctx = ds_context.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.SUM, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
expect = (1.0 * strategy.num_replicas_in_sync,
indexed_slices.IndexedSlices(
values=array_ops.identity(
[[1.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
2.0 * strategy.num_replicas_in_sync,
indexed_slices.IndexedSlices(
values=array_ops.identity(
[[2.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([1]),
dense_shape=array_ops.identity([5, 1])))
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
def _make_indexed_slices(values, indices, dense_shape):
tensor = indexed_slices.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _get_num_replicas_per_client(strategy):
if isinstance(strategy, CollectiveAllReduceStrategy):
resolver = strategy.cluster_resolver
return max(nest.flatten(resolver.num_accelerators())[0], 1)
else:
return strategy.num_replicas_in_sync
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=['eager']))
class DistributedCollectiveAllReduceStrategyTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testDatasetFromFunction(self, strategy):
def dataset_fn(input_context):
global_batch_size = 10
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = dataset_ops.DatasetV2.range(100).repeat().batch(batch_size)
return d.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
expected_sum_on_workers = {'chief': 10, 'worker': 35}
input_iterator = iter(
strategy.distribute_datasets_from_function(dataset_fn))
@def_function.function
def run(iterator):
return strategy.experimental_local_results(iterator.get_next())
result = run(input_iterator)
sum_value = math_ops.reduce_sum(result)
self.assertEqual(
sum_value.numpy(),
expected_sum_on_workers[multi_worker_test_base.get_task_type()])
def testSimpleInputFromDatasetLastPartialBatch(self, strategy):
global_batch_size = 8
dataset = dataset_ops.DatasetV2.range(14).batch(
global_batch_size, drop_remainder=False)
input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
@def_function.function
def run(input_iterator):
return strategy.run(lambda x: x, args=(next(input_iterator),))
# Let the complete batch go.
run(input_iterator)
# `result` is an incomplete batch
result = run(input_iterator)
expected_data_on_workers = {'chief': [8, 9, 10], 'worker': [11, 12, 13]}
self.assertAllEqual(
expected_data_on_workers[multi_worker_test_base.get_task_type()],
result.numpy(),
)
def testSimpleInputFromFnLastPartialBatch(self, strategy):
def dataset_fn(input_context):
global_batch_size = 8
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = dataset_ops.DatasetV2.range(14).batch(
batch_size, drop_remainder=False)
return dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
input_iterator = iter(
strategy.distribute_datasets_from_function(dataset_fn))
@def_function.function
def run(input_iterator):
return strategy.run(lambda x: x, args=(next(input_iterator),))
# Let the complete batch go.
run(input_iterator)
# `result` is an incomplete batch
result = run(input_iterator)
expected_data_on_worker = {'chief': [8, 9, 10, 11], 'worker': [12, 13]}
self.assertAllEqual(
expected_data_on_worker[multi_worker_test_base.get_task_type()],
result.numpy())
def testReduceHostTensor(self, strategy):
reduced = strategy.reduce(
reduce_util.ReduceOp.SUM, array_ops.identity(1.), axis=None)
self.assertEqual(reduced.numpy(), 2.)
def testReduceToHostTensor(self, strategy):
value = array_ops.identity(1.)
reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value,
value)
self.assertEqual(reduced.numpy(), 2.)
def testBatchReduceToHostTensor(self, strategy):
value = array_ops.identity(1.)
reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM,
[(value, value),
(value, value)])
self.assertAllEqual([2., 2.], reduced)
def testReduceDeviceTensors(self, strategy):
value = strategy.run(lambda: array_ops.identity(1.))
reduced = strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
self.assertEqual(reduced.numpy(), 2.)
def testReduceToDeviceTensors(self, strategy):
value = strategy.run(lambda: array_ops.identity(1.))
reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value,
value)
self.assertEqual(reduced.numpy(), 2.)
def testBatchReduceToDeviceTensors(self, strategy):
value = strategy.run(lambda: array_ops.identity(1.))
reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM,
[(value, value),
(value, value)])
self.assertAllEqual([2., 2.], reduced)
# TODO(crccw): add a test that mixes device and host tensors after multi
# worker strategy combinations can run on a fixed number of GPUs.
class StrategyClusterResolverTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
strategy=[strategy_combinations.multi_worker_mirrored_2x1_cpu] +
strategy_combinations.all_strategies,
mode=['eager']))
def testClusterResolverProperty(self, strategy):
# CollectiveAllReduceStrategy and TPUStrategy must have a cluster resolver.
# `None` otherwise.
resolver = strategy.cluster_resolver
if not isinstance(strategy, CollectiveAllReduceStrategy) and not isinstance(
strategy, tpu_strategy.TPUStrategy):
self.assertIsNone(resolver)
return
with strategy.scope():
self.assertIs(strategy.cluster_resolver, resolver)
self.assertTrue(hasattr(resolver, 'cluster_spec'))
self.assertTrue(hasattr(resolver, 'master'))
self.assertTrue(hasattr(resolver, 'num_accelerators'))
self.assertTrue(hasattr(resolver, 'task_id'))
self.assertTrue(hasattr(resolver, 'task_type'))
if isinstance(strategy, CollectiveAllReduceStrategy):
self.assertEqual(resolver.task_id, 0)
self.assertAllInSet(resolver.task_type, ['chief', 'worker'])
if __name__ == '__main__':
test_util.main()
| {
"content_hash": "7bed9f547c90e9a6f0a93a59f61eb817",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 99,
"avg_line_length": 37.194729136163986,
"alnum_prop": 0.6463155408597071,
"repo_name": "Intel-Corporation/tensorflow",
"id": "6c6f88cc11ca1b15ba801963fe85edb66e18b9e3",
"size": "26093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/strategy_common_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
import pylab
import pandas as pd
from easydev import precision
and_symbol = "^"
__all = ["Models", "BooleanModels", "ContinousModels",
"DTModels", "FuzzyModels", "CompareModels"]
class Models(object):
"""Data structure to store models.
Models are stored in dataframes. Columns will hold the reactions.
"""
def __init__(self, data, reacID=None, index_col=None, verbose=True):
""".. rubric:: constructor
:param data: could be a string to read a file (CSV). The CSV header
should contain the reaction name. First column is not expected
to be found. Note, however that :param:`index_col` may be used.
The input can also be a dataframe (column names being the reactions)
set to 0/1. The input can also be an instance of :class:`Models`.
:param list reacID: if provided, columns are renamed using this list
:param index_col:
:param bool verbose:
Reaction names may contain a symbol indicating the logical ANDs. This
should be "^" character.
"""
self.verbose = verbose
# FIXME interpret the first columns automatically ?
if isinstance(data, str):
self.filename = data
self.df = pd.read_csv(self.filename, index_col=index_col)
# FIXIME What is this
if reacID:
reacID = pd.read_csv(reacID)
self.df.columns = reacID.ix[:,0]
if 'Score' in self.df.columns:
self.scores = self.df.Score
del self.df['Score']
if 'score' in self.df.columns:
self.scores = self.df.score
del self.df['score']
elif isinstance(data, pd.DataFrame):
self.df = data.copy()
if 'Score' in self.df.columns:
self.scores = self.df.Score
del self.df['Score']
if 'score' in self.df.columns:
self.scores = self.df.score
del self.df['score']
elif isinstance(data, Models):
self.df = data.df.copy()
else:
from cno import CNOError
raise CNOError("input data not understood. Could be a filename, a dataframe or a Models instance")
if hasattr(data, 'scores'):
self.scores = getattr(data, 'scores')
# TODO: In a reaction from cnograph, they should be not ORs, just simple
# reactions and ANDS (e.g., A^B=C). If "A+B=C" is found, this is coming
# from CellNOptR, ,which has a different conventions. So, we replace
# all + by "^" !! Do we want a warning ?
for reaction in self.df.columns:
count = 0
if "+" in reaction:
# todo: use logging
if self.verbose and count == 0:
sys.stdout.write("Warning in Models. found a + sign... in %s. Interepreted as ^" % reaction)
count = 1
def convert(x):
from cno import Reaction
r = Reaction(x)
r.sort()
name = r.name
name = name.replace("+", "^")
return name
self.df.columns = [convert(x) for x in self.df.columns]
# we also reorder alphabetically the species in the and reactions
# keep this import here to avoid cycling imports
from cno.io.cnograph import CNOGraph
from cno.io import Reaction
self.cnograph = CNOGraph()
non_reactions = []
for this in self.df.columns:
try:
reac = Reaction(str(this))
self.cnograph.add_reaction(str(this))
except:
if self.verbose:
sys.stdout.write('Skipping column %s (not valid reaction ?)' % this)
non_reactions.append(this)
#self.non_reactions = non_reactions
#self.df_non_reactions = self.df[non_reactions].copy()
def drop_scores_above(self, tolerance=None):
max_score = self.scores.min() * (1+tolerance)
index = self.df.ix[self.scores<=max_score].index
self.df = self.df.ix[index]
self.scores = self.scores.ix[index]
def get_average_model(self, max_score=None):
"""Returns the average model (on each reaction)"""
if max_score is None:
return self.df.mean(axis=0)
else:
#filter scores below some vlues
N = float(sum(self.scores<=max_score))
sys.stdout.write('Keeping %s percent of the models' % str( N /len(self.scores)*100.))
return self.df.ix[self.scores<=max_score].mean(axis=0)
def to_csv(self, filename, index=False):
"""Exports the dataframe to a CSV file"""
try:
self.df['score'] = self.scores.values
except:
self.df['score'] = self.scores
self.df.to_csv(filename, index=False)
del self.df['score']
def to_sif(self, filename=None):
"""Exports 2 SIF using the "and" convention
can read the results with CellNOptR for instance::
library(CellNOptR)
plotModel(readSIF("test.sif"))
"""
return self.cnograph.to_sif(filename)
def __eq__(self, other):
if len(self.df) != len(other.df):
return False
df1 = self.df.copy()
df2 = other.df.copy()
if all(df1.columns != df2.columns):
return False
# make sure the columns are ordered similarly
df2 = df2[df1.columns]
return all(df1.sort() == df2.sort())
def __len__(self):
return len(self.df)
class ContinousModels(Models):
def __init__(self, data, reacID=None, index_col=None):
super(ContinousModels, self).__init__(data, reacID, index_col)
def drop_duplicates(self):
self.df['score'] = self.scores
self.df.drop_duplicates(inplace=True)
self.scores = self.df['score']
del self.df['score']
class FuzzyModels(Models):
def __init__(self, data, reacID=None, index_col=None):
super(FuzzyModels, self).__init__(data, reacID, index_col)
def copy(self):
return FuzzyModels(self)
class BooleanModels(Models):
"""Class to read and plot models as exported by CASPO or CellNOptR
Models contains dataframe with reactions as columns and models as rows.
For each reaction, we can then obtain the average paramters for a reaction.
In a boolean case, a Model stores a value made of 0/1
scores may be available. No sizes are stored. Sizes could be extracted easily
as sum over rows.
::
>>> from cno.core.models import Models
>>> m = Models()
>>> m.plot() # average model, whcih can be obtained with m.get_average_model()
>>> m.plot(model_number=0) # indices are m.df.index
>>> m.plot(model_number=0) # indices are m.df.index
.. note:: One difficulty is the way ANDs are coded in different software. In CASPO,
the AND gate is coded as "A+B=C". Note that internally we use ^ especially
in CNOGraph. Then, an AND edge is splitted in sub edges. so, A+B=C is made
of 3 edges A -> A+B=C , B -> A+B=C and A+B=C -> C. This explains the wierd
code in :meth:`cno.io.cnograph.plot`.
- plots average models with edges on/off
- plot of errobars on edges sorted by average presence
- plots heatmap of the models
"""
def __init__(self, data, reacID=None, index_col=None):
"""
if you have a first column, whihc is not a reaction, set index_col to 0
.. todo:: values are 0/1 since we have bit strings but could be anything in other
formalisms (e.g., ODE) how to handle those cases ?
:param dta: a filename with columns as the reacitons and rowss as
parameters for each reactions. Each row is therefore a model.
"""
super(BooleanModels, self).__init__(data, reacID, index_col)
def get_cv_model(self):
"""Returns the average coefficient of variation on each reaction"""
res = self.df.std(axis=0)/self.df.mean(axis=0)
res = res.fillna(0)
return res
def compute_average(self, model_number=None, tolerance=None):
"""Compute the average and update the cnograph accordingly
:param int model_number: model_number as shown by :attr:`df.index`
if not provided, the average is taken
"""
if model_number is None and tolerance is None:
model = self.get_average_model()
elif model_number == 'cv':
model = self.get_cv_model()
elif tolerance is not None:
model = self.get_average_model(max_score = self.scores.min() * (1.+tolerance))
if len(model) == 0:
raise ValueError('No model found within that tolerance')
else:
model = self.df.ix[model_number]
# This is to set the average and label and penwidth
# TODO: could be simplified using Reaction ?
for edge in self.cnograph.edges(data=True):
link = edge[2]['link']
if and_symbol not in edge[0] and and_symbol not in edge[1]:
if link == "-" :
name = "!" + edge[0] + "=" + edge[1]
else:
name = edge[0] + "=" + edge[1]
value = model[name]
elif and_symbol in edge[0]:
value = model[edge[0]]
elif and_symbol in edge[1]:
value = model[edge[1]]
else:
raise ValueError()
self.cnograph.edge[edge[0]][edge[1]]["label"] = precision(value)
self.cnograph.edge[edge[0]][edge[1]]["average"] = precision(value)
# if values are between 0 and 1
M = float(model.max())
self.cnograph.edge[edge[0]][edge[1]]["penwidth"] = precision(value, 2) * 5/M
def plot(self, model_number=None, cmap='gist_heat_r',
colorbar=True, tolerance=None, filename=None, **kargs):
"""Plot the average model"""
self.compute_average(model_number=model_number, tolerance=tolerance)
self.cnograph.plot(edge_attribute="average", cmap=cmap,
colorbar=colorbar, filename=filename, **kargs)
def errorbar(self, tolerance=1e8, errorbar=True):
"""Plot the average presence of reactions over all models"""
try:
df = self.df.ix[self.scores<=self.scores.min()*(1+tolerance)]
except:
df = self.df[(self.scores<=self.scores.min()*(1+tolerance)).values]
mu = df.mean()
mu.sort(inplace=True)
sigma = df.std()
pylab.clf()
X = range(0,len(mu.index))
if errorbar is True:
errorbar = 1
else:
errorbar = 0
pylab.errorbar(X, mu.values, yerr=sigma.ix[mu.index].values*errorbar,
marker='x', color='r', lw=0, elinewidth=2, ecolor='b')
pylab.xticks(X, mu.index, rotation=90)
pylab.title('')
pylab.grid()
pylab.ylim([-0.1, 1.1])
#pylab.xlim([-0.5, len(X)+.5])
pylab.tight_layout()
return df
def heatmap(self, num=1, transpose=False, cmap='gist_heat_r', heatmap_attr={}):
""" """
#df = self.get_average_models()
from biokit.viz.heatmap import Heatmap
if transpose:
df = self.df.transpose()
else:
df = self.df
h = Heatmap(df)
h.plot(cmap=cmap,num=num, **heatmap_attr)
return h
def __add__(self, other):
import pandas as pd
df = pd.concat([self.df, other.df])
df.drop_duplicates(inplace=True)
return Models(df)
def __str__(self):
txt = "Models contains {0} rows".format(len(self))
return txt
def copy(self):
return BooleanModels(self)
def _get_sizes(self):
return self.df.sum(axis=1)
sizes = property(_get_sizes)
def drop_duplicates(self):
self.df['score'] = self.scores
self.df.drop_duplicates(inplace=True)
self.scores = self.df['score']
del self.df['score']
def get_main_reactions(self, threshold=0.5):
reactions = list(self.df.columns[self.df.mean() > threshold])
reactions = [x.replace('+','^') for x in reactions]
return reactions
def get_consensus_model(self, threshold=0.5):
df = self.df.ix[self.scores<=self.scores.min()*(1.)]
reactions = list(df.mean()[df.mean() > threshold].index)
return reactions
def get_jaccard(self, progress=True):
import sklearn.metrics
N = len(self.df)
J = np.zeros((N,N))
from easydev import progress_bar
pb = progress_bar(N)
for ic, i in enumerate(self.df.index):
for jc, j in enumerate(self.df.index):
J[ic][jc] = sklearn.metrics.jaccard_similarity_score(self.df.ix[i], self.df.ix[j])
pb.animate(1+ic)
return J
class DTModels(BooleanModels):
def __init__(self, data, reacID=None, index_col=None):
super(DTModels, self).__init__(data, reacID, index_col)
def copy(self):
return DTModels(self)
class CompareTwoModels(object):
"""
"""
def __init__(self, m1, m2):
"""
:param m1: first model as a Pandas time series e.g. row of BooleanModels
:param m2: first model as a Pandas time series e.g. row of BooleanModels
:return:
from a models, m1 = pd.TimeSeries(models.df.ix[0], dtype=int)
m2 = pd.TimeSeries(models.df.ix[1], dtype=int)
"""
self.m1 = m1
self.m2 = m2
assert all(self.m1.index == self.m2.index) == True
self.midas = None
def get_intersection(self):
return self.m1[np.logical_and(self.m1, self.m2)]
def get_union(self):
return self.m1[np.logical_or(self.m1 , self.m2)]
def get_both(self):
return self.get_intersection()
def get_m1_only(self):
return self.m1[np.logical_and(self.m1==1, self.m2==0)]
def get_m2_only(self):
return self.m2[np.logical_and(self.m1==0, self.m2==1)]
def get_both_off(self):
return self.m2[np.logical_and(self.m1==0, self.m2==0)]
def plot_multigraph(self, cmap='jet'):
sys.stdout.write('plot_multigraph may not wrok as expected. Experimental')
from cno.io.multigraph import CNOGraphMultiEdges
#from cno import CNOGraph
from cno import Reaction
c = CNOGraphMultiEdges()
c.midas = self.midas
for reaction in self.get_both().index:
sys.stdout.write(str(reaction))
r = Reaction(reaction)
r.sort()
sys.stdout.write(str(c.reac2edges(r.name)))
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1,edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.1,
color='black', penwidth=6, label='both')
for reaction in self.get_m1_only().index:
r = Reaction(reaction)
r.sort()
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1, edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.3,
label='m1', color='red', penwidth=3)
for reaction in self.get_m2_only().index:
r = Reaction(reaction)
r.sort()
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1, edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.5,
label='m2', color='green', penwidth=3)
for reaction in self.get_both_off().index:
r = Reaction(reaction)
r.sort()
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1, edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.9,
label='', arrowsize=0, color='gray', penwidth=0)
#c.plot(edge_attribute='edgecolor', cmap=cmap)
c.plot()
return c
class MultiModels(object):
pass
| {
"content_hash": "aa917a2e21bff02a27ab5e95eb996c00",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 112,
"avg_line_length": 34.285420944558524,
"alnum_prop": 0.5578247589387315,
"repo_name": "cellnopt/cellnopt",
"id": "1ae32b248ebc6901c09b5bebe1b7265f2f7cd2ac",
"size": "17116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cno/core/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "11056"
},
{
"name": "JavaScript",
"bytes": "496"
},
{
"name": "Jupyter Notebook",
"bytes": "3748599"
},
{
"name": "Python",
"bytes": "845977"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, patterns
from . import views
app_name = 'consultas'
urlpatterns = patterns(
'',
url(r'^$', views.index, name='index'),
url(r'^info/(?P<empresa_cif>.+)$', views.info, name='info'),
)
| {
"content_hash": "87d3e81fd780dbe8a9832e10eeabd189",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 23,
"alnum_prop": 0.6260869565217392,
"repo_name": "dmsgago/ree",
"id": "70bcf78963dccf804bc2bb4504c80904c5e20cc4",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "consultas/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7949"
},
{
"name": "HTML",
"bytes": "4618"
},
{
"name": "JavaScript",
"bytes": "699"
},
{
"name": "Python",
"bytes": "19157"
},
{
"name": "Shell",
"bytes": "1564"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
import logging
import os
import threading
import pykka
import tornado.httpserver
import tornado.ioloop
import tornado.netutil
import tornado.web
import tornado.websocket
from mopidy import exceptions, models, zeroconf
from mopidy.core import CoreListener
from mopidy.http import handlers
from mopidy.utils import encoding, formatting, network
logger = logging.getLogger(__name__)
class HttpFrontend(pykka.ThreadingActor, CoreListener):
apps = []
statics = []
def __init__(self, config, core):
super(HttpFrontend, self).__init__()
self.hostname = network.format_hostname(config['http']['hostname'])
self.port = config['http']['port']
tornado_hostname = config['http']['hostname']
if tornado_hostname == '::':
tornado_hostname = None
try:
logger.debug('Starting HTTP server')
sockets = tornado.netutil.bind_sockets(self.port, tornado_hostname)
self.server = HttpServer(
config=config, core=core, sockets=sockets,
apps=self.apps, statics=self.statics)
except IOError as error:
raise exceptions.FrontendError(
'HTTP server startup failed: %s' %
encoding.locale_decode(error))
self.zeroconf_name = config['http']['zeroconf']
self.zeroconf_http = None
self.zeroconf_mopidy_http = None
def on_start(self):
logger.info(
'HTTP server running at [%s]:%s', self.hostname, self.port)
self.server.start()
if self.zeroconf_name:
self.zeroconf_http = zeroconf.Zeroconf(
stype='_http._tcp', name=self.zeroconf_name,
port=self.port)
self.zeroconf_mopidy_http = zeroconf.Zeroconf(
stype='_mopidy-http._tcp', name=self.zeroconf_name,
port=self.port)
self.zeroconf_http.publish()
self.zeroconf_mopidy_http.publish()
def on_stop(self):
if self.zeroconf_http:
self.zeroconf_http.unpublish()
if self.zeroconf_mopidy_http:
self.zeroconf_mopidy_http.unpublish()
self.server.stop()
def on_event(self, name, **data):
on_event(name, **data)
def on_event(name, **data):
event = data
event['event'] = name
message = json.dumps(event, cls=models.ModelJSONEncoder)
handlers.WebSocketHandler.broadcast(message)
class HttpServer(threading.Thread):
name = 'HttpServer'
def __init__(self, config, core, sockets, apps, statics):
super(HttpServer, self).__init__()
self.config = config
self.core = core
self.sockets = sockets
self.apps = apps
self.statics = statics
self.app = None
self.server = None
def run(self):
self.app = tornado.web.Application(self._get_request_handlers())
self.server = tornado.httpserver.HTTPServer(self.app)
self.server.add_sockets(self.sockets)
tornado.ioloop.IOLoop.instance().start()
logger.debug('Stopped HTTP server')
def stop(self):
logger.debug('Stopping HTTP server')
tornado.ioloop.IOLoop.instance().add_callback(
tornado.ioloop.IOLoop.instance().stop)
def _get_request_handlers(self):
request_handlers = []
request_handlers.extend(self._get_app_request_handlers())
request_handlers.extend(self._get_static_request_handlers())
request_handlers.extend(self._get_mopidy_request_handlers())
logger.debug(
'HTTP routes from extensions: %s',
formatting.indent('\n'.join(
'%r: %r' % (r[0], r[1]) for r in request_handlers)))
return request_handlers
def _get_app_request_handlers(self):
result = []
for app in self.apps:
result.append((
r'/%s' % app['name'],
handlers.AddSlashHandler
))
request_handlers = app['factory'](self.config, self.core)
for handler in request_handlers:
handler = list(handler)
handler[0] = '/%s%s' % (app['name'], handler[0])
result.append(tuple(handler))
logger.debug('Loaded HTTP extension: %s', app['name'])
return result
def _get_static_request_handlers(self):
result = []
for static in self.statics:
result.append((
r'/%s' % static['name'],
handlers.AddSlashHandler
))
result.append((
r'/%s/(.*)' % static['name'],
handlers.StaticFileHandler,
{
'path': static['path'],
'default_filename': 'index.html'
}
))
logger.debug('Loaded static HTTP extension: %s', static['name'])
return result
def _get_mopidy_request_handlers(self):
# Either default Mopidy or user defined path to files
static_dir = self.config['http']['static_dir']
if static_dir and not os.path.exists(static_dir):
logger.warning(
'Configured http/static_dir %s does not exist. '
'Falling back to default HTTP handler.', static_dir)
static_dir = None
if static_dir:
return [(r'/(.*)', handlers.StaticFileHandler, {
'path': self.config['http']['static_dir'],
'default_filename': 'index.html',
})]
else:
return [(r'/', tornado.web.RedirectHandler, {
'url': '/mopidy/',
'permanent': False,
})]
| {
"content_hash": "6d5526e718b3e99cfd6a230a718e4947",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 79,
"avg_line_length": 31.62087912087912,
"alnum_prop": 0.5716768027801912,
"repo_name": "woutervanwijk/mopidy",
"id": "57e2f46a274a609ee6866dc1304f8911bf97dc03",
"size": "5755",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "mopidy/http/actor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "JavaScript",
"bytes": "74911"
},
{
"name": "Python",
"bytes": "925399"
}
],
"symlink_target": ""
} |
import sys
import guestfs
import tempfile
import hivex
def create_merge_file(path):
"""Create mergeide.reg with the link to the proper controlset.
The original file points to a CurrentControlSet key in the registry
However, that key is not present when the guest is not running.
This function creates a mergeide.reg file with the proper controlsetvalue.
"""
g = mount(path)
hive = download_system_hive(g)
cset = get_current_control_set(hive)
template = None
with open('mergeide.reg.template') as f:
template = f.read()
with open('mergeide.reg', 'w') as f:
f.write(template % {'cset':cset})
def mount(path):
"""Mount windows guest with the given path."""
g = guestfs.GuestFS()
g.add_drive_opts(path, readonly=True)
g.launch()
roots = g.inspect_os()
if len(roots) == 0:
raise Exception("%s: no operating system found in the disk image" % (path))
root = roots[0]
if g.inspect_get_type(root) != "windows":
raise Exception("%s disk: not a Windows guest")
# mount image read only
g.mount_ro(root, "/")
g.__root = root
return g
def download_system_hive(g):
system_config_path = None
system_root = g.inspect_get_windows_systemroot(g.__root)
system_config_path = g.case_sensitive_path("%s/system32/config" % system_root)
if not system_config_path:
raise Exception("Ups. Couldn't locate Windows system config dir")
system_path = g.case_sensitive_path("%s/system" % system_config_path)
fhandle,local_system_hive_path = tempfile.mkstemp('system_hive')
g.download(system_path, local_system_hive_path)
system_hive = hivex.Hivex(local_system_hive_path)
return system_hive
def get_current_control_set(system_hive):
# more about windows reg control sets:
# http://support.microsoft.com/kb/100010
# Firstly get HKLM\SYSTEM\Select so we know which
# ControlSetNNN is in use
h = system_hive
h_root = h.root()
node = h.node_get_child(h_root, "Select")
val = h.node_get_value(node, "Current")
cset = "ControlSet%03d" % h.value_dword(val)
return cset
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit("usage: mergeide_create_reg.py <image>")
create_merge_file(sys.argv[1])
| {
"content_hash": "bdcc643070de8dd4e33a01c0a9749b35",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 83,
"avg_line_length": 31.04054054054054,
"alnum_prop": 0.6569438397910318,
"repo_name": "jakobadam/kvm-mergeide",
"id": "3f4c25638f258f5c3846a71520b5b6d3ce05708b",
"size": "2319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mergeide_create_reg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2319"
},
{
"name": "Shell",
"bytes": "1056"
}
],
"symlink_target": ""
} |
from org.python.util import PythonInterpreter
print recurse
pi = PythonInterpreter()
pi.set("terminal", terminal)
pi.set("recurse", recurse + 1)
pi.execfile('/media/truecrypt1/code/java/Hakd/python/programs/recursive.py')
| {
"content_hash": "49947a6a6c97b3c9c008d8afb1de58e1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 25,
"alnum_prop": 0.7733333333333333,
"repo_name": "Rsgm/Hakd",
"id": "fccd601d138b70bfee634e0e708254b277c36022",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/assets/python/programs/rsgm/recursive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "738"
},
{
"name": "Java",
"bytes": "260889"
},
{
"name": "Python",
"bytes": "28270"
}
],
"symlink_target": ""
} |
import os
import sys
# In ORT Package the symbolic_shape_infer.py is in ../tools
file_path = os.path.dirname(__file__)
if os.path.exists(os.path.join(file_path, "../tools/symbolic_shape_infer.py")):
sys.path.append(os.path.join(file_path, '../tools'))
else:
sys.path.append(os.path.join(file_path, '..'))
from symbolic_shape_infer import *
class SymbolicShapeInferenceHelper(SymbolicShapeInference):
def __init__(self, model, verbose=0, int_max=2**31 - 1, auto_merge=True, guess_output_rank=False):
super().__init__(int_max, auto_merge, guess_output_rank, verbose)
self.model_ = onnx.ModelProto()
self.model_.CopyFrom(model)
self.all_shapes_inferred_ = False
self.inferred_ = False
# The goal is to remove dynamic_axis_mapping
def infer(self, dynamic_axis_mapping):
if self.inferred_:
return self.all_shapes_inferred_
self.dynamic_axis_mapping_ = dynamic_axis_mapping # e.g {"batch_size" : 4, "seq_len" :7}
self._preprocess(self.model_)
while self.run_:
self.all_shapes_inferred_ = self._infer_impl()
self.inferred_ = True
return self.all_shapes_inferred_
# override _preprocess() to avoid unnecessary model copy since ctor copies the model
def _preprocess(self, in_mp):
self.out_mp_ = in_mp
self.initializers_ = dict([(i.name, i) for i in self.out_mp_.graph.initializer])
self.known_vi_ = dict([(i.name, i) for i in list(self.out_mp_.graph.input)])
self.known_vi_.update(
dict([(i.name, helper.make_tensor_value_info(i.name, i.data_type, list(i.dims)))
for i in self.out_mp_.graph.initializer]))
# Override _get_sympy_shape() in symbolic_shape_infer.py to ensure shape inference by giving the actual value of dynamic axis
def _get_sympy_shape(self, node, idx):
sympy_shape = []
for d in self._get_shape(node, idx):
if type(d) == str:
if d in self.dynamic_axis_mapping_.keys():
sympy_shape.append(self.dynamic_axis_mapping_[d])
elif d in self.symbolic_dims_:
sympy_shape.append(self.symbolic_dims_[d])
else:
sympy_shape.append(sympy.Symbol(d, integer=True))
else:
assert None != d
sympy_shape.append(d)
return sympy_shape
def get_edge_shape(self, edge):
assert (self.all_shapes_inferred_ == True)
if edge not in self.known_vi_:
print("Cannot retrive the shape of " + str(edge))
return None
type_proto = self.known_vi_[edge].type
shape = get_shape_from_type_proto(type_proto)
for i in range(len(shape)):
d = shape[i]
if type(d) == str and d in self.dynamic_axis_mapping_.keys():
shape[i] = self.dynamic_axis_mapping_[d]
return shape
def compare_shape(self, edge, edge_other):
assert (self.all_shapes_inferred_ == True)
shape = self.get_edge_shape(edge)
shape_other = self.get_edge_shape(edge_other)
if shape is None or shape_other is None:
raise Exception("At least one shape is missed for edges to compare")
return shape == shape_other
| {
"content_hash": "a99d34d38de3b121366b379c13866913",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 129,
"avg_line_length": 42,
"alnum_prop": 0.6024713682941532,
"repo_name": "ryfeus/lambda-packs",
"id": "1aaa7b120678ba9785603d78fdf5c5269e4b1db0",
"size": "3565",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ONNX/lambda-onnx/onnxruntime/transformers/shape_infer_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import base64
import copy
import json
import isodate
from datetime import datetime
from pyld import jsonld
import pytz
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.exceptions import InvalidSignature
SECURITY_CONTEXT_URL = 'https://w3id.org/security/v1'
SECURITY_CONTEXT = {
"@context": {
"id": "@id",
"type": "@type",
"dc": "http://purl.org/dc/terms/",
"sec": "https://w3id.org/security#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"EcdsaKoblitzSignature2016": "sec:EcdsaKoblitzSignature2016",
"EncryptedMessage": "sec:EncryptedMessage",
"GraphSignature2012": "sec:GraphSignature2012",
"LinkedDataSignature2015": "sec:LinkedDataSignature2015",
"LinkedDataSignature2016": "sec:LinkedDataSignature2016",
"CryptographicKey": "sec:Key",
"authenticationTag": "sec:authenticationTag",
"canonicalizationAlgorithm": "sec:canonicalizationAlgorithm",
"cipherAlgorithm": "sec:cipherAlgorithm",
"cipherData": "sec:cipherData",
"cipherKey": "sec:cipherKey",
"created": {"@id": "dc:created", "@type": "xsd:dateTime"},
"creator": {"@id": "dc:creator", "@type": "@id"},
"digestAlgorithm": "sec:digestAlgorithm",
"digestValue": "sec:digestValue",
"domain": "sec:domain",
"encryptionKey": "sec:encryptionKey",
"expiration": {"@id": "sec:expiration", "@type": "xsd:dateTime"},
"expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"},
"initializationVector": "sec:initializationVector",
"iterationCount": "sec:iterationCount",
"nonce": "sec:nonce",
"normalizationAlgorithm": "sec:normalizationAlgorithm",
"owner": {"@id": "sec:owner", "@type": "@id"},
"password": "sec:password",
"privateKey": {"@id": "sec:privateKey", "@type": "@id"},
"privateKeyPem": "sec:privateKeyPem",
"publicKey": {"@id": "sec:publicKey", "@type": "@id"},
"publicKeyPem": "sec:publicKeyPem",
"publicKeyService": {"@id": "sec:publicKeyService", "@type": "@id"},
"revoked": {"@id": "sec:revoked", "@type": "xsd:dateTime"},
"salt": "sec:salt",
"signature": "sec:signature",
"signatureAlgorithm": "sec:signingAlgorithm",
"signatureValue": "sec:signatureValue"}}
IDENTITY_CONTEXT_URL = 'https://w3id.org/identity/v1'
IDENTITY_CONTEXT = {
"@context": {
"id": "@id",
"type": "@type",
"cred": "https://w3id.org/credentials#",
"dc": "http://purl.org/dc/terms/",
"identity": "https://w3id.org/identity#",
"perm": "https://w3id.org/permissions#",
"ps": "https://w3id.org/payswarm#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"sec": "https://w3id.org/security#",
"schema": "http://schema.org/",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"Group": "https://www.w3.org/ns/activitystreams#Group",
"claim": {"@id": "cred:claim", "@type": "@id"},
"credential": {"@id": "cred:credential", "@type": "@id"},
"issued": {"@id": "cred:issued", "@type": "xsd:dateTime"},
"issuer": {"@id": "cred:issuer", "@type": "@id"},
"recipient": {"@id": "cred:recipient", "@type": "@id"},
"Credential": "cred:Credential",
"CryptographicKeyCredential": "cred:CryptographicKeyCredential",
"about": {"@id": "schema:about", "@type": "@id"},
"address": {"@id": "schema:address", "@type": "@id"},
"addressCountry": "schema:addressCountry",
"addressLocality": "schema:addressLocality",
"addressRegion": "schema:addressRegion",
"comment": "rdfs:comment",
"created": {"@id": "dc:created", "@type": "xsd:dateTime"},
"creator": {"@id": "dc:creator", "@type": "@id"},
"description": "schema:description",
"email": "schema:email",
"familyName": "schema:familyName",
"givenName": "schema:givenName",
"image": {"@id": "schema:image", "@type": "@id"},
"label": "rdfs:label",
"name": "schema:name",
"postalCode": "schema:postalCode",
"streetAddress": "schema:streetAddress",
"title": "dc:title",
"url": {"@id": "schema:url", "@type": "@id"},
"Person": "schema:Person",
"PostalAddress": "schema:PostalAddress",
"Organization": "schema:Organization",
"identityService": {"@id": "identity:identityService", "@type": "@id"},
"idp": {"@id": "identity:idp", "@type": "@id"},
"Identity": "identity:Identity",
"paymentProcessor": "ps:processor",
"preferences": {"@id": "ps:preferences", "@type": "@vocab"},
"cipherAlgorithm": "sec:cipherAlgorithm",
"cipherData": "sec:cipherData",
"cipherKey": "sec:cipherKey",
"digestAlgorithm": "sec:digestAlgorithm",
"digestValue": "sec:digestValue",
"domain": "sec:domain",
"expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"},
"initializationVector": "sec:initializationVector",
"member": {"@id": "schema:member", "@type": "@id"},
"memberOf": {"@id": "schema:memberOf", "@type": "@id"},
"nonce": "sec:nonce",
"normalizationAlgorithm": "sec:normalizationAlgorithm",
"owner": {"@id": "sec:owner", "@type": "@id"},
"password": "sec:password",
"privateKey": {"@id": "sec:privateKey", "@type": "@id"},
"privateKeyPem": "sec:privateKeyPem",
"publicKey": {"@id": "sec:publicKey", "@type": "@id"},
"publicKeyPem": "sec:publicKeyPem",
"publicKeyService": {"@id": "sec:publicKeyService", "@type": "@id"},
"revoked": {"@id": "sec:revoked", "@type": "xsd:dateTime"},
"signature": "sec:signature",
"signatureAlgorithm": "sec:signatureAlgorithm",
"signatureValue": "sec:signatureValue",
"CryptographicKey": "sec:Key",
"EncryptedMessage": "sec:EncryptedMessage",
"GraphSignature2012": "sec:GraphSignature2012",
"LinkedDataSignature2015": "sec:LinkedDataSignature2015",
"accessControl": {"@id": "perm:accessControl", "@type": "@id"},
"writePermission": {"@id": "perm:writePermission", "@type": "@id"}
}
}
_get_values = jsonld.JsonLdProcessor.get_values
def _get_value(obj, key):
try:
return _get_values(obj, key)[0]
# A bit more accurate since we're trying to pull a value out of a specific
# key, and nothing exists for this one
except IndexError:
raise KeyError(key)
_has_value = jsonld.JsonLdProcessor.has_value
def _make_simple_loader(url_map, load_unknown_urls=True,
cache_externally_loaded=True):
def _make_context(url, doc):
return {
"contextUrl": None,
"documentUrl": url,
"document": doc}
# Wrap in the structure that's expected to come back from the
# documentLoader
_pre_url_map = {}
_pre_url_map.update(url_map)
_url_map = {
url: _make_context(url, doc)
for url, doc in _pre_url_map.items()}
def loader(url):
if url in _url_map:
return _url_map[url]
elif load_unknown_urls:
doc = jsonld.load_document(url)
# @@: Is this optimization safe in all cases?
if isinstance(doc["document"], str):
doc["document"] = json.loads(doc["document"])
_url_map[url] = doc
return doc
else:
raise jsonld.JsonLdError(
"url not found and loader set to not load unknown URLs.",
{'url': url})
return loader
_security_context_loader = _make_simple_loader(
{SECURITY_CONTEXT_URL: SECURITY_CONTEXT,
IDENTITY_CONTEXT_URL: IDENTITY_CONTEXT})
# @@: Shouldn't this be a mapping from these names to their actual
# functionality? Seems kludgy to have all these if-elif-else things
# as interspersed through the document...
# Okay, answer is yes
# TODO: Make these JsonLdErrors
# class LdsError(jsonld.JsonLdError): pass
# class LdsTypeError(LdsError, TypeError): pass
class LdsError(Exception): pass
class LdsTypeError(LdsError, TypeError): pass
def is_valid_uri(obj):
"""
Check to see if OBJ is a valid URI
(or at least do the best check we can: that it's a string, and that
it contains the ':' character.)
"""
return isinstance(obj, str) and ":" in obj
def sign(document, options):
"""
Signs a JSON-LD document using a digital signature.
- input: the JSON-LD document to be signed.
- options: options to use:
[privateKeyPem] A PEM-encoded private key.
[creator] the URL to the paired public key.
[date] an optional date to override the signature date with.
If provided, must have an "aware" timezone
(.tzinfo not None)
[domain] an optional domain to include in the signature.
[nonce] an optional nonce to include in the signature.
[algorithm] the algorithm to use, eg: 'GraphSignature2012',
'LinkedDataSignature2015' (default: 'GraphSignature2012').
"""
options = copy.deepcopy(options)
# TODO: The spec says privateKey, but in jsonld-signatures.js there are
# these two separate fields...
options["date"] = options.get("date") or datetime.now(pytz.utc)
options.setdefault("algorithm", "GraphSignature2012")
if not options["algorithm"] in SUITES:
raise LdsError(
("[jsig.sign] Unsupported algorithm '%s'; options.algorithm must "
"be one of: %s") % (options["algorithm"], SUITES.keys()))
suite = SUITES[options["algorithm"]]
options = suite.signature_munge_verify_options(options)
# @@: Do we need this in the sign thing?
sig_options = {
"date": options["date"]
}
if "nonce" in options:
sig_options["nonce"] = options["nonce"]
if "domain" in options:
sig_options["domain"] = options["domain"]
formatted = suite.format_for_signature(document, sig_options, options)
sig_val = suite.sign_formatted(formatted, options)
signature = {
"@context": SECURITY_CONTEXT_URL,
"type": options["algorithm"],
"creator": options["creator"],
"created": options["date"],
"signatureValue": sig_val}
if "domain" in options:
signature["domain"] = options["domain"]
if "nonce" in options:
signature["nonce"] = options["nonce"]
ctx = _get_values(document, "@context")
compacted = jsonld.compact(
{"https://w3id.org/security#signature": signature},
ctx, options={
"documentLoader": _security_context_loader})
del compacted["@context"]
output = copy.deepcopy(document)
# @@: Wow, this seems like a terribly kludgy way to get that key,
# but that's what's done in jsonld-signatures.js. I mean,
# I guess it should work. I guess this is to avoid that the name may
# be either expanded or compacted at this point
signature_key = list(compacted.keys())[0]
# TODO: support multiple signatures.
# Same warning as in jsonld-signatures.js! ;P
# We could put this in the suite option?
output[signature_key] = compacted[signature_key]
return output
def _basic_rsa_signature(formatted, options):
private_key = serialization.load_pem_private_key(
options["privateKeyPem"],
password=None,
backend=default_backend())
signed = private_key.sign(
formatted,
# I'm guessing this is the right padding function...?
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH),
hashes.SHA256())
return base64.b64encode(signed).decode("utf-8")
def _getDataToHash_2012_2015(input, sig_options, options):
# TODO: These are two separate algorithms, so we should separate them
to_hash = ""
if options["algorithm"] == "GraphSignature2012":
if "nonce" in sig_options:
to_hash += sig_options["nonce"]
to_hash += sig_options["date"]
to_hash += input
if "domain" in sig_options:
to_hash += "@" + sig_options["domain"]
else:
headers = {
"http://purl.org/dc/elements/1.1/created": sig_options.get("date"),
"https://w3id.org/security#domain": sig_options.get("domain"),
"https://w3id.org/security#nonce": sig_options.get("nonce")};
# add headers in lexicographical order
for key in sorted(headers.keys()):
value = headers[key]
if value is not None:
to_hash += "%s: %s\n" % (key, value)
to_hash += input
return to_hash.encode("utf-8")
def _w3c_date(dt):
# We may need to convert it to UTC
if dt.tzinfo is not pytz.utc:
dt = dt.astimezone(pytz.utc)
return isodate.datetime_isoformat(dt)
# Verification
def verify(signed_document, options):
"""
Signs a JSON-LD document using a digital signature.
Args:
- input: the JSON-LD document to be verified.
- options:
# TODO: Not all these are implemented yet, and some may be algorithm
# specific
Options:
- publicKey(signature, options): A procedure which, if present, is called
to retrieve the public key. Must do all validation that ownership
correcly aligns.
- checkNonce(nonce, options)] a procedure to check if the nonce (null
if none) used in the signature is valid.
- checkDomain(domain, options): a procedure to check if the domain used
(null if none) is valid.
- checkKey(key, options): a procedure to check if the key used to sign the
message is trusted.
- checkKeyOwner(owner, key, options): a procedure to check if the key's
owner is trusted.
- checkTimestamp: check signature timestamp (default: false).
- maxTimestampDelta: signature must be created within a window of
this many seconds (default: 15 minutes).
- documentLoader(url): the document loader.
- id the ID (full URL) of the node to check the signature of, if
the input contains multiple signed nodes.
"""
options = copy.copy(options)
loader = options.get("documentLoader", _security_context_loader)
options.setdefault("algorithm", "GraphSignature2012")
# Here's a TODO copy-pasta'ed from jsonld-signatures.js:
# TODO: frame before getting signature, not just compact? considerations:
# should the assumption be that the signature is on the top-level object
# and thus framing is unnecessary?
compacted = jsonld.compact(
signed_document, SECURITY_CONTEXT_URL, options={
"documentLoader": loader})
try:
signature = _get_values(compacted, "signature")[0]
except IndexError:
raise LdsError('[jsigs.verify] No signature found.')
try:
suite_name = _get_values(signature, "type")[0]
except IndexError:
suite_name = ""
if not suite_name in SUITES:
raise LdsError(
("[jsigs.verify] Unsupported signature algorithm \"%s\"; "
"supported algorithms are: %s") % (suite_name,
SUITES.keys()))
suite = SUITES[suite_name]
# TODO: Should we be framing here? According to my talks with Dave Longley
# we probably should, though I don't know how well pyld supports framing
# and I need to wrap my head around it better
# @@: So here we have to extract the signature
# @@: 3 before 1 and 2? Well we need it in 1 and 2 :P
# SPEC (3): Remove any signature nodes from the default graph in
# document and save it as signature.
# @@: This isn't recursive, should it be? Also it just handles
# one value for now.
# SPEC (2): Let document be a copy of signed document.
document = copy.deepcopy(compacted)
signature = document.pop("signature")
# SPEC (1): Get the public key by dereferencing its URL identifier
# in the signature node of the default graph of signed document.
# @@: Rest of SPEC(1) in _get_public_key
get_public_key = options.get("publicKey", _get_public_key)
public_key = get_public_key(signature, options)
# SPEC (5): Create a value tbv that represents the data to be
# verified, and set it to the result of running the Create Verify
# Hash Algorithm, passing the information in signature.
# TODO: This doesn't look like the same verification step
# being done in the signature step as ported from jsonld-signatures.js
# It looks like what step we do here should be farmed out depending
# on the signature suite used.
# @@: Maybe sig_options should be munged by the suite?
sig_options = {}
if "publicKeyPem" in public_key:
sig_options["publicKeyPem"] = _get_value(public_key, "publicKeyPem")
if "publicKeyWif" in public_key:
sig_options["publicKeyWif"] = _get_value(public_key, "publicKeyWif")
if "nonce" in signature:
sig_options["nonce"] = _get_value(signature, "nonce")
if "domain" in signature:
sig_options["domain"] = _get_value(signature, "domain")
# @@: Why isn't this also "created"?
sig_options["date"] = _get_value(signature, "created")
tbv = suite.format_for_signature(document, sig_options, options)
# SPEC (6): Pass the signatureValue, tbv, and the public key to
# the signature algorithm (e.g. JSON Web Signature using
# RSASSA-PKCS1-v1_5 algorithm). Return the resulting boolean
# value.
return suite.verify_formatted(signature, tbv, public_key, options)
def _get_public_key(signature, options):
def _id_of(obj):
if isinstance(obj, str):
return obj
return obj.get("@id") or obj.get("id")
creator_id = _id_of(_get_value(signature, "creator"))
if not creator_id:
raise LdsError(
'[jsigs.verify] creator not found on signature.')
creator = _get_security_compacted_jsonld(creator_id, options)
if not "publicKey" in creator:
raise LdsError(
'[jsigs.verify] publicKey not found on creator object')
# @@: What if it's a fragment identifier on an embedded object?
public_key_id = _get_value(creator, "publicKey")
public_key = _get_security_compacted_jsonld(
public_key_id, options)
owners = _get_values(public_key, "owner")
# SPEC (1): Confirm that the linked data document that describes
# the public key specifies its owner and that its owner's URL
# identifier can be dereferenced to reveal a bi-directional link
# back to the key.
if not creator_id in owners:
raise LdsError(
'[jsigs.verify] The public key is not owned by its declared owner.')
# SPEC (1): Ensure that the key's owner is a trusted entity before
# proceeding to the next step.
check_key_owner = options.get("checkKeyOwner")
if check_key_owner and not check_key_owner(signature, public_key, options):
raise LdsError(
'[jsigs.verify] The owner of the public key is not trusted.')
return public_key
def _security_compact(document, options):
loader = options.get("documentLoader", _security_context_loader)
return jsonld.compact(document, SECURITY_CONTEXT_URL,
options={"documentLoader": loader})
def _get_jsonld(id, options):
if isinstance(id, dict):
id = id.get("id") or id.get("@id")
if not id:
raise ValueError("Tried to fetch object with no id: %s" % id)
loader = options.get("documentLoader", _security_context_loader)
return loader(id)["document"]
def _get_security_compacted_jsonld(id, options):
return _security_compact(_get_jsonld(id, options), options)
# TODO: Are we actually passing in multiple aglgorithms for message
# canonicalization *and* message digest?
def create_verify_hash(document, suite, options,
options_to_canonicalize):
"""
"""
normalized_input = suite.normalize_jsonld(document, options)
# SPEC (1): Let options be a copy of input options.
options_to_canonicalize = copy.deepcopy(options_to_canonicalize)
# SPEC (2): If type, id, or signatureValue exists in options,
# remove the entry.
# @@: Well since we're specifically passing these in to this procedure
# I guess we don't need to do that...
# SPEC (3): If created does not exist in options, add an entry
# with a value that is an ISO8601 combined date and time string
# containing the current date and time accurate to at least one
# second, in Universal Time Code format. For example:
# 2017-11-13T20:21:34Z.
if not "created" in options_to_canonicalize:
options_to_canonicalize["created"] = _w3c_date(datetime.now(pytz.utc))
# SPEC (4): Generate output by:
# SPEC (4.1): Creating a canonicalized options document by
# canonicalizing options according to the canonicalization
# algorithm (e.g. the GCA2015 [RDF-DATASET-NORMALIZATION]
# algorithm).
# Well, we need to add the context first:
options_to_canonicalize["@context"] = SECURITY_CONTEXT_URL
canonical_options = suite.normalize_jsonld(
options_to_canonicalize, options)
# SPEC (4.2): Hash canonicalized options document using the
# message digest algorithm (e.g. SHA-256) and set output to the
# result.
output = suite.message_digest(canonical_options, options)
# SPEC (4.3): Hash canonicalized document using the message digest
# algorithm (e.g. SHA-256) and append it to output.
output += suite.message_digest(normalized_input, options)
# SPEC (5): Hash output using the message digest algorithm
# (e.g. SHA-256) and replace it with the result.
output = suite.message_digest(output, options)
# SPEC (6): Return output.
return output
def _rsa_verify_sig(sig_value, formatted, public_key_jsonld):
"""
- sig_value: data to be verified
- public_key: creator of this document's public_key
- tbv: to be verified
"""
# TODO: Support other formats than just PEM
public_key = serialization.load_pem_public_key(
_get_value(public_key_jsonld, "publicKeyPem").encode("utf-8"),
backend=default_backend())
try:
public_key.verify(
base64.b64decode(sig_value.encode("utf-8")), formatted,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH),
hashes.SHA256())
return True
except InvalidSignature:
return False
# In the future, we'll be doing a lot more work based on what suite is
# selected.
def signature_common_munge_verify(options):
if not is_valid_uri(options["creator"]):
raise LdsTypeError(
"[jsig.sign] options.creator must be a URL string.")
if "domain" in options and not is_valid_uri(options["domain"]):
raise LdsTypeError(
"[jsig.sign] options.domain must be a string.")
if "nonce" in options and not is_valid_uri(options["nonce"]):
raise LdsTypeError(
"[jsig.sign] options.nonce must be a string.")
if not isinstance(options["date"], str):
options["date"] = _w3c_date(options["date"])
return options
class SignatureSuite():
name = None
@classmethod
def signature_munge_verify_options(cls, options):
options = signature_common_munge_verify(options)
return options
@classmethod
def normalize_jsonld(cls, document, options):
raise NotImplementedError()
@classmethod
def format_for_signature(cls, document, sig_options, options):
raise NotImplementedError()
@classmethod
def sign_formatted(cls, formatted, options):
raise NotImplementedError()
@classmethod
def verify_formatted(cls, formatted, options):
raise NotImplementedError()
def _format_gs_2012_ld_2015(suite, document, sig_options, options):
normalized = suite.normalize_jsonld(document, options)
if len(normalized) == 0:
raise LdsError(
('[jsig.sign] '
'The data to sign is empty. This error may be because a '
'"@context" was not supplied in the input thereby causing '
'any terms or prefixes to be undefined. '
'Input: %s') % (json.dumps(document)))
return _getDataToHash_2012_2015(normalized, sig_options, options)
class GraphSignature2012(SignatureSuite):
name = "GraphSignature2012"
@classmethod
def format_for_signature(cls, document, sig_options, options):
return _format_gs_2012_ld_2015(cls, document, sig_options, options)
@classmethod
def normalize_jsonld(self, document, options):
return jsonld.normalize(
document,
{"algorithm": "URGNA2012",
"format": "application/nquads",
"documentLoader": options.get("documentLoader",
_security_context_loader)})
@classmethod
def sign_formatted(cls, formatted, options):
return _basic_rsa_signature(formatted, options)
@classmethod
def verify_formatted(cls, signature, formatted, public_key_jsonld, options):
return _rsa_verify_sig(
_get_value(signature, "signatureValue"),
formatted, public_key_jsonld)
class LinkedDataSignature2015(SignatureSuite):
name = "LinkedDataSignature2015"
@classmethod
def normalize_jsonld(cls, document, options):
return jsonld.normalize(
document, {"algorithm": "URDNA2015",
"format": "application/nquads"})
@classmethod
def format_for_signature(cls, document, sig_options, options):
return _format_gs_2012_ld_2015(cls, document, sig_options, options)
@classmethod
def sign_formatted(cls, formatted, options):
return _basic_rsa_signature(formatted, options)
class EcdsaKoblitzSignature2016(SignatureSuite):
name = "EcdsaKoblitzSignature2016"
@classmethod
def signature_munge_verify_options(cls, options):
options = signature_common_munge_verify(options)
if not isinstance(options.get("privateKeyWif", str)):
raise LdsTypeError(
"[jsig.sign] options.privateKeyWif must be a base 58 "
"formatted string.")
elif not isinstance(options.get("privateKeyPem"), str):
raise LdsTypeError(
"[jsig.sign] options.privateKeyPem must be a PEM "
"formatted string.")
return options
class LinkedDataSignature2016(SignatureSuite):
name = "LinkedDataSignature2016"
SUITES = {
s.name: s
for s in [GraphSignature2012,
LinkedDataSignature2015,
# EcdsaKoblitzSignature2016,
]}
| {
"content_hash": "b6dd568495cd1d44ecd9f1451fc0d544",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 80,
"avg_line_length": 37.956824512534816,
"alnum_prop": 0.6263163688401276,
"repo_name": "Spec-Ops/pyld-signatures",
"id": "3afa87a38ca717d25cce3b644e240051b31af18b",
"size": "28940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyld_sig/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39888"
},
{
"name": "Scheme",
"bytes": "1596"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
from __future__ import division
from subprocess import check_output
from math import log, sqrt, pow, e
from scipy import special
import numpy as np
import StringIO
import argparse
import time
import os
#Set the author information
__author__ = "Justin R. Klesmith"
__copyright__ = "Copyright 2016, Justin R. Klesmith"
__credits__ = ["Justin R. Klesmith", "Caitlin A. Kowalsky", "Timothy A. Whitehead"]
__license__ = "BSD-3"
__version__ = "2.2x, Build: 201607X"
__maintainer__ = "Justin R. Klesmith"
__email__ = ["klesmit3@msu.edu", "justinklesmith@gmail.com", "justinklesmith@evodyn.com"]
#Get commandline arguments
parser = argparse.ArgumentParser(description='QuickNormalize '+__version__+' for Growth or FACS')
parser.add_argument('-n', dest='normtype', action='store', required=True, help='Normalization Type? Enter: growth or FACS')
parser.add_argument('-s', dest='startresidue', action='store', required=True, help='What is the start residue? ie: 0, 40, 80')
#parser.add_argument('-l', dest='length', action='store', required=True, help='Length of your tile? ie: 40, 80')
parser.add_argument('-g', dest='gp', action='store', help='How many doublings/generations? (GROWTH) ie: 12.5')
parser.add_argument('-d', dest='stddev', action='store', help='Standard Deviation? (FACS) ie: 0.6')
parser.add_argument('-c', dest='percentcollected', action='store', help='Percent Collected? (FACS) ie: 0.05')
parser.add_argument('-q', dest='heatfilename', action='store', help='Name for your heatmap')
parser.add_argument('-p', dest='path', action='store', required=True, help='What is the path to the enrich output directory? ie: ./tile/data/output/')
parser.add_argument('-t', dest='sigthreshold', action='store', nargs='?', const=1, default=5, help='Unselected counts for significance. Default = 5')
#parser.add_argument('-w', dest='wildtype', action='store', nargs='?', const=1, default='./WTSeq', help='File with the wild-type amino acid sequence. Default = ./WTSeq')
parser.add_argument('-o', dest='heatmap', action='store', nargs='?', const=1, default='True', help='Output a csv heatmap? Default = True')
parser.add_argument('-y', dest='ewtenrichment', action='store', help='Manual Ewt enrichment value')
parser.add_argument('-z', dest='eiscalar', action='store', help='Manual Ei enrichment scalar')
args = parser.parse_args()
#Verify inputs
if args.normtype != "growth" and args.normtype != "FACS" and args.normtype != "Plate1" and args.normtype != "Plate2":
print "Missing normalization type. Flag: -n"
quit()
if args.startresidue == None:
print "Missing start residue. Flag: -s"
quit()
#if args.length == None:
#print "Missing tile length. Flag: -l"
#quit()
if args.gp == None and args.normtype == "growth":
print "Missing doublings. Flag: -g"
quit()
if args.stddev == None and args.normtype == "FACS":
print "Missing SD. Flag: -d"
quit()
if args.percentcollected == None and args.normtype == "FACS":
print "Missing percent collected. Flag: -c"
quit()
if args.path == None:
print "Missing Enrich output path. Flag: -p"
quit()
if args.ewtenrichment and args.eiscalar != None:
#This section is only true if we want to provide our own WT enrichment and a scalar to add to Ei
OverrideEwtEi = True
ManualEwt = float(args.ewtenrichment)
EiScalar = float(args.eiscalar)
else:
OverrideEwtEi = False
#Global Variables
#if os.path.isfile(args.wildtype):
#with open(args.wildtype, 'r') as infile: #Open the file with the wild-type protein sequence
#WTSeq = infile.readline() #Read the first line of the WT sequence file
#else:
#print "Wild-type sequence file not found...exit"
#quit()
StartResidue = int(args.startresidue) #Starting residue for your tile
SignificantThreshold = int(args.sigthreshold) #Number of counts in the unselected library and selected library to be significant
Path = args.path+"/data/output/" #What is the path to the output directory
ConfigPath = args.path+"/input/example_local_config" #Path to the config file
with open(ConfigPath) as infile:
for line in infile:
if line.startswith("<wtPRO>"):
Len = len(line)
WTSeq = line[7:Len-10]
TileLen = len(WTSeq)
if args.normtype == "growth":
DoublingsGp = float(args.gp) #Number of doublings
if args.normtype == "FACS":
SD = float(args.stddev) #Standard Deviation
PC = float(args.percentcollected) #Percent collected
THEOENRICHMENT = -log(PC, 2) #Theoretical maximum enrichment
#AA_Table = '*ACDEFGHIKLMNPQRSTVWY'
AA_Table = '*FWYPMILVAGCSTNQDEHKR'
Mutations = {} #Mutations matrix
Ewt = None #Initialize the variable for the wildtype enrichment
UCwt = None #Unselected WT counts
SCwt = None #Selected WT counts
######################################################################################
#
#Main Program Functions
#
######################################################################################
######################################################################################
#Build_Matrix
#This does the initial population of the Mutations matrix that holds counts and
#enrichment values
######################################################################################
def Build_Matrix():
#Populate mutation matrix with None data
for j in xrange(0,TileLen):
for i in enumerate(AA_Table):
try:
#Mutations[ResID][MutID[1]][0 = RawLog2, 1 = Fitness, 2 = Unselected, 3 = Selected, 4=Unused, 5=WT]
Mutations[j][i[1]] = [None, None, None, None, None, False]
except KeyError:
Mutations[j] = {}
Mutations[j][i[1]] = [None, None, None, None, None, False]
return Mutations
######################################################################################
#Get_WT_Ewt
#This gets the wild-type enrichment from the enrich NA-NA output
######################################################################################
def Get_WT():
global Ewt
global UCwt
global SCwt
#Extract NA-NA WT Ewt log2
awk = ""
awk2 = ""
awk3 = ""
if os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc'):
awk = check_output(["awk", '{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc'])
elif os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc'):
awk = check_output(["awk", '{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc'])
else:
print "Selected protein ratios file not found...exit"
quit()
if os.path.isfile(Path+'counts_sel_example_F_N_include_filtered_B_PRO_qc'):
awk2 = check_output(["awk", '{ print $5,$6,$9 }', Path+'counts_sel_example_F_N_include_filtered_B_PRO_qc'])
elif os.path.isfile(Path+'counts_sel_example_F_N_include_filtered_R1_PRO_qc'):
awk2 = check_output(["awk", '{ print $5,$6,$9 }', Path+'counts_sel_example_F_N_include_filtered_R1_PRO_qc'])
else:
print "Sel protein counts file not found...exit"
quit()
if os.path.isfile(Path+'counts_unsel_example_F_N_include_filtered_B_PRO_qc'):
awk3 = check_output(["awk", '{ print $5,$6,$9 }', Path+'counts_unsel_example_F_N_include_filtered_B_PRO_qc'])
elif os.path.isfile(Path+'counts_unsel_example_F_N_include_filtered_R1_PRO_qc'):
awk3 = check_output(["awk", '{ print $5,$6,$9 }', Path+'counts_unsel_example_F_N_include_filtered_R1_PRO_qc'])
else:
print "Unsel protein counts file not found...exit"
quit()
#Loop through the output
for line in StringIO.StringIO(awk):
split = line.split(" ")
location = str(split[0])
identity = str(split[1])
if location == "NA" and identity == "NA":
Ewt = float(split[2].rstrip('\n'))
print "Wild-type log2 (Ewt): "+str(Ewt)
#Loop through the output
for line in StringIO.StringIO(awk2):
split = line.split(" ")
location = str(split[0])
identity = str(split[1])
if location == "NA" and identity == "NA":
SCwt = int(split[2].rstrip('\n'))
print "Selected wild-type counts: "+str(SCwt)
#Loop through the output
for line in StringIO.StringIO(awk3):
split = line.split(" ")
location = str(split[0])
identity = str(split[1])
if location == "NA" and identity == "NA":
UCwt = int(split[2].rstrip('\n'))
print "Unselected wild-type counts: "+str(UCwt)
return
######################################################################################
#Get_Mut_Ei
#This gets the enrichment of each mutation from the enrich output
######################################################################################
def Get_Mut_Ei():
#Extract Mut Ei log2
awk = ""
if os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc.m1'])
elif os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'])
else:
print "Selected protein ratios .m1 file not found...exit"
quit()
#Loop through the output
for line in StringIO.StringIO(awk):
split = line.split(" ")
location = int(split[0])
identity = str(split[1])
Ei = float(split[2].rstrip('\n'))
#Check to see if we're above the tile length and go to next
if location >= TileLen:
continue
#For FACS set a upper limit on enrichment, don't do anything for growth
if args.normtype == "FACS":
#Check to see if the enrichment is greater or equal than the theoretical
if OverrideEwtEi == False: #Apply no scalar to the Ei
if Ei >= THEOENRICHMENT:
Mutations[location][identity][0] = (THEOENRICHMENT - 0.001)
else:
Mutations[location][identity][0] = Ei
elif OverrideEwtEi == True: #Apply a scalar to the Ei
if Ei >= (THEOENRICHMENT + EiScalar):
Mutations[location][identity][0] = ((THEOENRICHMENT + EiScalar) - 0.001)
else:
Mutations[location][identity][0] = (Ei + EiScalar)
else:
Mutations[location][identity][0] = Ei
return Mutations
######################################################################################
#Get_Unsel_Counts
#This gets the unselected counts for each mutation
######################################################################################
def Get_Unsel_Counts():
#Get the unselected counts for a variant
awk = ""
if os.path.isfile(Path+'counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$9 }', Path+'counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'])
elif os.path.isfile(Path+'counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$9 }', Path+'counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'])
else:
print "Unselected protein counts .m1 file not found...exit"
quit()
#Loop through the output
for line in StringIO.StringIO(awk):
split = line.split(" ")
location = int(split[0])
identity = str(split[1])
counts = int(split[2].rstrip('\n'))
#Check to see if we're above the tile length and go to next
if location >= TileLen:
continue
Mutations[location][identity][2] = counts #Set the unselected counts
return Mutations
######################################################################################
#Get_Sel_Counts
#This gets the selected counts for each mutation
######################################################################################
def Get_Sel_Counts():
#Get the selected counts
awk = ""
if os.path.isfile(Path+'counts_sel_example_F_N_include_filtered_B_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$9 }', Path+'counts_sel_example_F_N_include_filtered_B_PRO_qc.m1'])
elif os.path.isfile(Path+'counts_sel_example_F_N_include_filtered_R1_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$9 }', Path+'counts_sel_example_F_N_include_filtered_R1_PRO_qc.m1'])
else:
print "Selected protein counts .m1 file not found...exit"
quit()
#Loop through the output
for line in StringIO.StringIO(awk):
split = line.split(" ")
location = int(split[0])
identity = str(split[1])
counts = int(split[2].rstrip('\n'))
#Check to see if we're above the tile length
if location >= TileLen:
continue
Mutations[location][identity][3] = counts #Set the selected counts
return Mutations
######################################################################################
#Normalize
#This normalizes the enrichments to the wild-type using the fitness metric equations
######################################################################################
def Normalize():
#Check to see if the wild-type enrichment is set
if Ewt == None:
print "Error: Wild-Type enrichment is not set...quit"
quit()
print ""
print "Normalizing the data"
print "Location,Mutation,Normalized_ER,Unselected_Reads,Selected_Reads,RawLog2"
for j in xrange(0,TileLen):
for i in enumerate(AA_Table):
#Check for a case where a significant variant fell out of the population
if Mutations[j][i[1]][0] == None and Mutations[j][i[1]][2] >= SignificantThreshold and Mutations[j][i[1]][3] == None:
Mutations[j][i[1]][0] = log((1/Mutations[j][i[1]][2]), 2) #Calculate the raw log2 for this variant and report it as less than this value
#Calculate the fitness
if Mutations[j][i[1]][0] != None and Mutations[j][i[1]][2] >= SignificantThreshold: #Report the significant fitness
Ei = float(Mutations[j][i[1]][0])
if args.normtype == "growth":
Mutant = (Ei/DoublingsGp)+1
WT = (Ewt/DoublingsGp)+1
if (Mutant/WT) < 0:
NE = -10 #Assign an extremely negative fitness for members who are greather than -8 raw log2 enrichment
else:
NE = log(Mutant/WT, 2)
elif args.normtype == "FACS":
WT = special.erfinv(1-PC*pow(2,(Ewt+1)))
Mutant = special.erfinv(1-PC*pow(2,(Ei+1)))
NE = (log(e, 2)*sqrt(2)*SD*(WT-Mutant))
elif args.normtype == "Plate1":
NE = (pow(2, Ei))/(pow(2, Ewt))
elif args.normtype == "Plate2":
NE = (Ei-Ewt)
else:
print "Error: growth or FACS not set?"
quit()
Mutations[j][i[1]][1] = "{0:.4f}".format(NE)
elif Mutations[j][i[1]][2] < SignificantThreshold: #Report the insignificant NEs
if WTSeq[j] == i[1]: #Check to see if it's wildtype else it's Not Significant
Mutations[j][i[1]][0] = Ewt
if args.normtype != "Plate1":
Mutations[j][i[1]][1] = "0.000"
else:
Mutations[j][i[1]][1] = "1.000"
Mutations[j][i[1]][2] = UCwt
Mutations[j][i[1]][3] = SCwt
Mutations[j][i[1]][5] = True #Set the WT flag
else:
Mutations[j][i[1]][1] = "NS"
elif Mutations[j][i[1]][2] == None and Mutations[j][i[1]][3] >= SignificantThreshold: #Error: Mutation with selected counts and no unselected
Mutations[j][i[1]][1] = "Error: Sel with Zero Unsel"
else:
print "Error: unknown normalization problem."
#Print out column data
print str(j+StartResidue)+","+i[1]+","+Mutations[j][i[1]][1]+","+str(Mutations[j][i[1]][2])+","+str(Mutations[j][i[1]][3])+","+str(Mutations[j][i[1]][0])
return Mutations
######################################################################################
#Make_CSV
#This outputs the fitness metrics to a CSV file to be imported into excel
######################################################################################
def Make_CSV():
print "Normalized Heatmap"
#This makes a CSV style report of rows of letters and columns of residues
#Print off the Number
Numbering = " "
for q in xrange(1,TileLen+1):
Numbering = Numbering+","+str(StartResidue+q)
print Numbering
#Print off the WT Residue
WTResi = " "
for w in xrange(0,TileLen):
WTResi = WTResi+","+WTSeq[w]
print WTResi
#Print off the mutations
Output = ""
for i in enumerate(AA_Table):
Output = Output+i[1]+","
for j in xrange(0,TileLen):
Output = Output+str(Mutations[j][i[1]][1])+","
Output = Output+"\n"
print Output
if args.heatmap == "True":
#Write the heatmap to a newfile
outfile = open('fitnessheatmap_'+args.heatfilename+'_'+str(StartResidue)+'.csv', 'w')
outfile.write(Numbering+'\n')
outfile.write(WTResi+'\n')
outfile.write(Output)
return
######################################################################################
#main
#This is the main function that calls the sub-functions, it also outputs the run
#information including the command line parameters
######################################################################################
def main():
global Ewt
#Write out preamble
print "QuickNormalize"
print "Author: "+__author__
print "Contact: "+__email__[0]+", "+__email__[1]+", "+__email__[2]
print __copyright__
print "Version: "+__version__
print "License: "+__license__
print "Credits: "+__credits__[0]+", "+__credits__[1]+", "+__credits__[2]
print ""
print "Please cite:"
print "Github [user: JKlesmith] (www.github.com)"
print "Kowalsky CA, Klesmith JR, Stapleton JA, Kelly V, Reichkitzer N, Whitehead TA. 2015. High-Resolution Sequence-Function Mapping of Full-Length Proteins. PLoS ONE 10(3):e0118193. doi:10.1371/journal.pone.0118193."
print "Klesmith JR, Bacik J-P, Michalczyk R, Whitehead TA. 2015. Comprehensive Sequence-Flux Mapping of a Levoglucosan Utilization Pathway in E. coli."
print ""
print "Normalization run parameters:"
print time.strftime("%H:%M:%S")
print time.strftime("%m/%d/%Y")
print "Start residue (-s): "+args.startresidue
print "Normalization type (-n): "+args.normtype
if args.normtype == "growth":
print "GROWTH: Doublings (gp) (-g): "+args.gp
if args.normtype == "FACS":
print "FACS: SD (-d): "+args.stddev
print "FACS: Percent Collected (-c): "+args.percentcollected
print "FACS: Theoretical max enrichment based off of percent collected: "+str(THEOENRICHMENT)
print "Tile Length: "+str(TileLen)
print "Enrich directory (-p): "+args.path
print "Unselected counts to be significant (-t): "+str(args.sigthreshold)
#print "Wild-type sequence file (-w): "+args.wildtype
print "Wild-type sequence: "+WTSeq
#Build Matrix
Build_Matrix()
#Get the selected counts
Get_Unsel_Counts()
Get_Sel_Counts()
#Get the raw log2 data
if OverrideEwtEi == True:
#Set the manual Ewt enrichment
Ewt = ManualEwt
print "Manually set Ewt (-y): "+str(Ewt)
print "Ei scalar transform (-z): "+str(EiScalar)
else:
Get_WT()
Get_Mut_Ei()
#Normalize the Data
Normalize()
#Print out a csv
Make_CSV()
if __name__ == '__main__':
main() | {
"content_hash": "5e16630c5c82ab16e873a1db6511830c",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 221,
"avg_line_length": 42.54713114754098,
"alnum_prop": 0.563309733660839,
"repo_name": "JKlesmith/Deep_Sequencing_Analysis",
"id": "2d977c67c014656e0867f971fff8a6c261829510",
"size": "20910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QuickNormalize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "117924"
}
],
"symlink_target": ""
} |
"""Example implementation of two double ended sliders as extension widgets"""
from bokeh.core.properties import Float, Instance, Tuple, Bool, Enum
from bokeh.models import InputWidget
from bokeh.models.callbacks import Callback
from bokeh.core.enums import SliderCallbackPolicy
from bokeh.layouts import column
from bokeh.models import Slider, CustomJS, ColumnDataSource
from bokeh.io import show
from bokeh.plotting import Figure
class IonRangeSlider(InputWidget):
# The special class attribute ``__implementation__`` should contain a string
# of JavaScript (or CoffeeScript) code that implements the JavaScript side
# of the custom extension model or a string name of a JavaScript (or
# CoffeeScript) file with the implementation.
__implementation__ = 'extensions_ion_range_slider.coffee'
__javascript__ = ["https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js",
"https://cdnjs.cloudflare.com/ajax/libs/ion-rangeslider/2.1.4/js/ion.rangeSlider.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/normalize/4.2.0/normalize.css",
"https://cdnjs.cloudflare.com/ajax/libs/ion-rangeslider/2.1.4/css/ion.rangeSlider.css",
"https://cdnjs.cloudflare.com/ajax/libs/ion-rangeslider/2.1.4/css/ion.rangeSlider.skinFlat.min.css",
"https://cdnjs.cloudflare.com/ajax/libs/ion-rangeslider/2.1.4/img/sprite-skin-flat.png"]
# Below are all the "properties" for this model. Bokeh properties are
# class attributes that define the fields (and their types) that can be
# communicated automatically between Python and the browser. Properties
# also support type validation. More information about properties in
# can be found here:
#
# https://bokeh.pydata.org/en/latest/docs/reference/core.html#bokeh-core-properties
disable = Bool(default=True, help="""
Enable or disable the slider.
""")
grid = Bool(default=True, help="""
Show or hide the grid beneath the slider.
""")
start = Float(default=0, help="""
The minimum allowable value.
""")
end = Float(default=1, help="""
The maximum allowable value.
""")
range = Tuple(Float, Float, help="""
The start and end values for the range.
""")
step = Float(default=0.1, help="""
The step between consecutive values.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the current Slider value changes.
""")
callback_throttle = Float(default=200, help="""
Number of microseconds to pause between callback calls as the slider is moved.
""")
callback_policy = Enum(SliderCallbackPolicy, default="throttle", help="""
When the callback is initiated. This parameter can take on only one of three options:
"continuous": the callback will be executed immediately for each movement of the slider
"throttle": the callback will be executed at most every ``callback_throttle`` milliseconds.
"mouseup": the callback will be executed only once when the slider is released.
The `mouseup` policy is intended for scenarios in which the callback is expensive in time.
""")
x = [x*0.005 for x in range(2, 198)]
y = x
source = ColumnDataSource(data=dict(x=x, y=y))
plot = Figure(plot_width=400, plot_height=400)
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6, color='#ed5565')
callback_single = CustomJS(args=dict(source=source), code="""
var data = source.data;
var f = cb_obj.value
var x = data['x']
var y = data['y']
for (var i = 0; i < x.length; i++) {
y[i] = Math.pow(x[i], f)
}
source.change.emit();
""")
callback_ion = CustomJS(args=dict(source=source), code="""
var data = source.data;
var f = cb_obj.range
var x = data['x']
var y = data['y']
var pow = (Math.log(y[100])/Math.log(x[100]))
console.log(pow)
var delta = (f[1] - f[0])/x.length
for (var i = 0; i < x.length; i++) {
x[i] = delta*i + f[0]
y[i] = Math.pow(x[i], pow)
}
source.change.emit();
""")
slider = Slider(start=0, end=5, step=0.1, value=1, title="Bokeh Slider - Power", callback=callback_single)
ion_range_slider = IonRangeSlider(start=0.01, end=0.99, step=0.01, range=(min(x), max(x)),
title='Ion Range Slider - Range', callback=callback_ion, callback_policy='continuous')
layout = column(plot, slider, ion_range_slider)
show(layout)
| {
"content_hash": "3b6f7f187474138de7d9d74da5a2a3bd",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 115,
"avg_line_length": 38.9059829059829,
"alnum_prop": 0.6586115992970123,
"repo_name": "Karel-van-de-Plassche/bokeh",
"id": "4103508b63c835a71db086202d8dd62cabf93d91",
"size": "4552",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sphinx/source/docs/user_guide/examples/extensions_example_widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705342"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468291"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import User
class UserAdmin(admin.ModelAdmin):
fields = ('first_name', 'last_name', 'birthday', 'gender', 'language', 'timezone', 'is_superuser', 'is_staff', 'is_active', 'username', 'email', 'password', 'created_on', 'updated_on', 'last_login',)
readonly_fields = ('password', 'created_on', 'updated_on', 'last_login',)
admin.site.register(User, UserAdmin)
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
admin.site.unregister(Group)
admin.site.unregister(Site) | {
"content_hash": "917331826a528d43af43503422d42bc5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 203,
"avg_line_length": 29.842105263157894,
"alnum_prop": 0.7160493827160493,
"repo_name": "dogukantufekci/easyfind",
"id": "78baaeeeeedac2e137020e9a8cffc2142f6a029a",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easyfind/users/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "22530"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
import aarhus
import custom_stopwords
| {
"content_hash": "96a2242cfdccf5c238f2e38da565529f",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 19,
"alnum_prop": 0.868421052631579,
"repo_name": "mikedelong/aarhus",
"id": "0464ae03ba08c507f2b948109b559f1a57a8ce92",
"size": "38",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aarhus/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "146893"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.io
from datasets.read_libsvm import *
import logging
import logging.config
import os
def libsvm2mat(fname, ndata, nfeatures, binary=True):
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger("opt")
logger.info("Starting data read")
(X, d) = readLibSVM(fname=fname, ndata=ndata, nfeatures=nfeatures, binary=binary)
# Permute rows
#logger.info("Permuting rows")
#np.random.seed(42)
#perm = np.random.permutation(ndata)
#X = X[perm, :]
#d = d[perm]
logger.info("Convert dataset to CSC, in transposed form")
X = X.tocsr().transpose()
logger.info("Saving full dataset ...")
# Save in matlab format
scipy.io.savemat(
file_name="%s.mat" % fname,
mdict={'X': X, 'd': d},
do_compression=True,
format='5',
oned_as='row')
logger.info("Done!!!")
| {
"content_hash": "45455a5e82f1504b9b6c361088fb0ba4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 25.157894736842106,
"alnum_prop": 0.6307531380753139,
"repo_name": "adefazio/point-saga",
"id": "801c2b79be5b111f8e94c6f361747f517dcf98f0",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasets/libsvm2mat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56265"
},
{
"name": "Shell",
"bytes": "658"
}
],
"symlink_target": ""
} |
from datetime import datetime
from google.appengine.api import taskqueue
import config
import logic.alias
import logic.email
import logic.event
from errors import TaintedLove
from logic.toggle import get_toggle_state
from models import Employee
from models import Love
from models import LoveCount
from models.toggle import LOVE_SENDING_ENABLED
from util.company_values import get_hashtag_value_mapping
from util.render import render_template
def _love_query(start_dt, end_dt, include_secret):
query = Love.query().order(-Love.timestamp)
if type(start_dt) is datetime:
query = query.filter(Love.timestamp >= start_dt)
if type(end_dt) is datetime:
query = query.filter(Love.timestamp <= end_dt)
if type(include_secret) is bool and include_secret is False:
query = query.filter(Love.secret == False) # noqa
return query
def _sent_love_query(employee_key, start_dt, end_dt, include_secret):
return _love_query(start_dt, end_dt, include_secret).filter(Love.sender_key == employee_key)
def _received_love_query(employee_key, start_dt, end_dt, include_secret):
return _love_query(start_dt, end_dt, include_secret).filter(Love.recipient_key == employee_key)
def recent_sent_love(employee_key, start_dt=None, end_dt=None, include_secret=True, limit=None):
query = _sent_love_query(employee_key, start_dt, end_dt, include_secret)
return query.fetch_async(limit) if type(limit) is int else query.fetch_async()
def recent_received_love(employee_key, start_dt=None, end_dt=None, include_secret=True, limit=None):
query = _received_love_query(employee_key, start_dt, end_dt, include_secret)
return query.fetch_async(limit) if type(limit) is int else query.fetch_async()
def _love_query_by_company_value(employee_key, company_value, start_dt, end_dt, include_secret):
return _love_query(start_dt, end_dt, include_secret).filter(Love.company_values == company_value)
def _love_query_with_any_company_value(employee_key, start_dt, end_dt, include_secret):
company_values = [value.id for value in config.COMPANY_VALUES]
return _love_query(start_dt, end_dt, include_secret).filter(Love.company_values.IN(company_values))
def recent_loves_by_company_value(employee_key, company_value, start_dt=None, end_dt=None,
include_secret=False, limit=None):
query = _love_query_by_company_value(employee_key, company_value, start_dt, end_dt, include_secret)
return query.fetch_async(limit) if type(limit) is int else query.fetch_async()
def recent_loves_with_any_company_value(employee_key, start_dt=None, end_dt=None,
include_secret=False, limit=None):
query = _love_query_with_any_company_value(employee_key, start_dt, end_dt, include_secret)
return query.fetch_async(limit) if type(limit) is int else query.fetch_async()
def send_love_email(l): # noqa
"""Send an email notifying the recipient of l about their love."""
sender_future = l.sender_key.get_async()
recipient_future = l.recipient_key.get_async()
# Remove this love from recent_love if present (datastore is funny sometimes)
recent_love = recent_received_love(l.recipient_key, limit=4).get_result()
index_to_remove = None
for i, love in enumerate(recent_love):
if l.sender_key == love.sender_key and l.recipient_key == love.recipient_key and l.message == love.message:
index_to_remove = i
break
if index_to_remove is not None:
del recent_love[index_to_remove]
sender = sender_future.get_result()
recipient = recipient_future.get_result()
from_ = config.LOVE_SENDER_EMAIL
to = recipient.user.email()
subject = u'Love from {}'.format(sender.full_name)
body_text = u'"{}"\n\n{}'.format(
l.message,
'(Sent secretly)' if l.secret else ''
)
body_html = render_template(
'email.html',
love=l,
sender=sender,
recipient=recipient,
recent_love_and_lovers=[(love, love.sender_key.get()) for love in recent_love[:3]]
)
logic.email.send_email(from_, to, subject, body_html, body_text)
def get_love(sender_username=None, recipient_username=None, limit=None):
"""Get all love from a particular sender or to a particular recipient.
:param sender_username: If present, only return love sent from a particular user.
:param recipient_username: If present, only return love sent to a particular user.
:param limit: If present, only return this many items.
"""
sender_username = logic.alias.name_for_alias(sender_username)
recipient_username = logic.alias.name_for_alias(recipient_username)
if not (sender_username or recipient_username):
raise TaintedLove('Not gonna give you all the love in the world. Sorry.')
if sender_username == recipient_username:
raise TaintedLove('Who sends love to themselves? Honestly?')
love_query = (
Love.query()
.filter(Love.secret == False) # noqa
.order(-Love.timestamp)
)
if sender_username:
sender_key = Employee.get_key_for_username(sender_username)
love_query = love_query.filter(Love.sender_key == sender_key)
if recipient_username:
recipient_key = Employee.get_key_for_username(recipient_username)
love_query = love_query.filter(Love.recipient_key == recipient_key)
if limit:
return love_query.fetch_async(limit)
else:
return love_query.fetch_async()
def send_loves(recipients, message, sender_username=None, secret=False):
if get_toggle_state(LOVE_SENDING_ENABLED) is False:
raise TaintedLove('Sorry, sending love is temporarily disabled. Please try again in a few minutes.')
recipient_keys, unique_recipients = validate_love_recipients(recipients)
if sender_username is None:
sender_username = Employee.get_current_employee().username
sender_username = logic.alias.name_for_alias(sender_username)
sender_key = Employee.query(
Employee.username == sender_username,
Employee.terminated == False, # noqa
).get(keys_only=True) # noqa
if sender_key is None:
raise TaintedLove(u'Sorry, {} is not a valid user.'.format(sender_username))
# Only raise an error if the only recipient is the sender.
if sender_key in recipient_keys:
recipient_keys.remove(sender_key)
unique_recipients.remove(sender_username)
if len(recipient_keys) == 0:
raise TaintedLove(u'You can love yourself, but not on {}!'.format(
config.APP_NAME
))
for recipient_key in recipient_keys:
_send_love(recipient_key, message, sender_key, secret)
return unique_recipients
def validate_love_recipients(recipients):
unique_recipients = set([logic.alias.name_for_alias(name) for name in recipients])
if len(recipients) != len(unique_recipients):
raise TaintedLove(u'Sorry, you are trying to send love to a user multiple times.')
# validate all recipients before carrying out any Love transactions
recipient_keys = []
for recipient_username in unique_recipients:
recipient_key = Employee.query(
Employee.username == recipient_username,
Employee.terminated == False # noqa
).get(keys_only=True) # noqa
if recipient_key is None:
raise TaintedLove(u'Sorry, {} is not a valid user.'.format(recipient_username))
else:
recipient_keys += [recipient_key]
return recipient_keys, unique_recipients
def _send_love(recipient_key, message, sender_key, secret):
"""Send love and do associated bookkeeping."""
new_love = Love(
sender_key=sender_key,
recipient_key=recipient_key,
message=message,
secret=(secret is True),
)
new_love.company_values = _get_company_values(new_love, message)
new_love.put()
LoveCount.update(new_love)
# Send email asynchronously
taskqueue.add(
url='/tasks/love/email',
params={
'id': new_love.key.id()
}
)
if not secret:
logic.event.add_event(
logic.event.LOVESENT,
{'love_id': new_love.key.id()},
)
def _get_company_values(new_love, message):
# Handle hashtags.
hashtag_value_mapping = get_hashtag_value_mapping()
matched_categories = set()
for hashtag, category in hashtag_value_mapping.iteritems():
if hashtag in message.lower():
matched_categories.add(category)
company_values = []
for value in matched_categories:
company_values.append(value)
return company_values
| {
"content_hash": "36eb947853ee06f8544ea28a3aa938c0",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 115,
"avg_line_length": 36.634453781512605,
"alnum_prop": 0.6765684138089231,
"repo_name": "Yelp/love",
"id": "e57d2ad01319f87b49a4a91134cc3fc9c7744fe0",
"size": "8743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logic/love.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5902"
},
{
"name": "HTML",
"bytes": "43114"
},
{
"name": "JavaScript",
"bytes": "836"
},
{
"name": "Makefile",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "118945"
}
],
"symlink_target": ""
} |
"""
Experimental optimization
This module provides a single mixin class which allows protocols to
collapse numerous small writes into a single larger one.
@author: Jp Calderone
"""
from twisted.internet import reactor
class BufferingMixin:
"""Mixin which adds write buffering.
"""
_delayedWriteCall = None
data = None
DELAY = 0.0
def schedule(self):
return reactor.callLater(self.DELAY, self.flush)
def reschedule(self, token):
token.reset(self.DELAY)
def write(self, data):
"""
Buffer some bytes to be written soon.
Every call to this function delays the real write by C{self.DELAY}
seconds. When the delay expires, all collected bytes are written
to the underlying transport using L{ITransport.writeSequence}.
"""
if self._delayedWriteCall is None:
self.data = []
self._delayedWriteCall = self.schedule()
else:
self.reschedule(self._delayedWriteCall)
self.data.append(data)
def flush(self):
"""Flush the buffer immediately.
"""
self._delayedWriteCall = None
self.transport.writeSequence(self.data)
self.data = None
| {
"content_hash": "d0bcd6ea5fffca67943eedcf73e1cef5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 74,
"avg_line_length": 25.122448979591837,
"alnum_prop": 0.6425670186839968,
"repo_name": "Tokyo-Buffalo/tokyosouth",
"id": "99439cf1475e906fe465155021e48f87fd3099d2",
"size": "1360",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/twisted/conch/mixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "145891"
},
{
"name": "Python",
"bytes": "7587"
}
],
"symlink_target": ""
} |
import csv
import json
from datetime import datetime
"""Useful for converting DynamoDB exports to CSV"""
data = []
with open('data') as f:
for line in f:
data.append(json.loads(line))
with open('data.csv', 'w', encoding='utf8') as csvf:
w = csv.writer(csvf, quoting=csv.QUOTE_MINIMAL)
w.writerow(['id', 'created_at', 'user_name', 'url'])
for idx, entry in enumerate(data):
created_at = datetime.fromtimestamp(
int(entry['createdAt']['n'])/1000.0)
user_name = entry['userName']['s']
url = entry['url']['s']
w.writerow([idx+1, created_at, user_name, url])
| {
"content_hash": "8a8fce368191214d91727938ab2e3612",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 56,
"avg_line_length": 29.80952380952381,
"alnum_prop": 0.6118210862619808,
"repo_name": "akrawchyk/amweekly",
"id": "f7562fbb0fd40e6f732b13273da04f190ed5f4bf",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/dynamodb_to_csv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "84"
},
{
"name": "HTML",
"bytes": "3168"
},
{
"name": "JavaScript",
"bytes": "4295"
},
{
"name": "Python",
"bytes": "45027"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
import hashlib
from anchore_engine.db import AnalysisArtifact
from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate
from anchore_engine.utils import ensure_bytes, ensure_str
MALWARE_CONTEXT_KEY = "malware_scans"
class ScanFindingsTrigger(BaseTrigger):
__trigger_name__ = "scans"
__description__ = (
"Triggers if any malware scanner has found any matches in the image."
)
@staticmethod
def _trigger_id(scanner, file, signature):
"""
Trigger id is a string, but encoded as scanner name, signature, and m5hash of the file path (to keep size within reasonable bounds)
:param scanner:
:param file:
:param signature:
:return:
"""
return "{}+{}+{}".format(
scanner,
signature,
ensure_str(
hashlib.new(
"md5", ensure_bytes(file), usedforsecurity=False
).hexdigest()
),
)
def evaluate(self, image_obj, context):
try:
scans = context.data[MALWARE_CONTEXT_KEY]
except KeyError:
scans = {}
for scanner, scan in scans.items():
scan_name = scan.get("scanner", scanner)
for finding in scan.get("findings", []):
signature = finding.get("signature")
path = finding.get("path")
if not path or not signature:
# Invalid finding, just for safety
continue
trigger_inst_id = self._trigger_id(scan_name, path, signature)
self._fire(
instance_id=trigger_inst_id,
msg="Malware scan finding: scanner={} file={} signature={}".format(
scan_name, path, signature
),
)
class ScanNotRunTrigger(BaseTrigger):
__trigger_name__ = "scan_not_run"
__description__ = "Triggers if no malware scan has been run on the image."
def evaluate(self, image_obj, context):
try:
scans = context.data.get(MALWARE_CONTEXT_KEY)
except AttributeError:
scans = None
if not scans:
self._fire(msg="No malware scans found for image")
class MalwareGate(Gate):
__gate_name__ = "malware"
__description__ = "Checks for malware scan findings in the image"
__triggers__ = [ScanFindingsTrigger, ScanNotRunTrigger]
def prepare_context(self, image_obj, context):
"""
prepare the context by extracting the file name list once and placing it in the eval context to avoid repeated
loads from the db. this is an optimization and could removed.
:rtype:
:param image_obj:
:param context:
:return:
"""
scans = image_obj.analysis_artifacts.filter(
AnalysisArtifact.analyzer_id == "malware",
AnalysisArtifact.analyzer_artifact == "malware",
AnalysisArtifact.analyzer_type == "base",
).all()
context.data[MALWARE_CONTEXT_KEY] = {
scanner.artifact_key: scanner.json_value for scanner in scans
}
return context
| {
"content_hash": "5a49899810be9e09944930cf2939d929",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 139,
"avg_line_length": 32.22,
"alnum_prop": 0.574487895716946,
"repo_name": "anchore/anchore-engine",
"id": "19179851ad6f82779b0a59c47d3ba2f0fdf20ae7",
"size": "3222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anchore_engine/services/policy_engine/engine/policy/gates/malware.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
import functools
import logging
from os.path import dirname, join
from . import cloudmask # noqa
from . import config # noqa
from . import constants # noqa
from . import files # noqa
from . import geodesy # noqa
from . import geographical # noqa
from . import latex # noqa
from . import math # noqa
from . import nonlte # noqa
from . import physics # noqa
from . import plots # noqa
from . import spectroscopy # noqa
from . import topography # noqa
from . import trees # noqa
from . import utils # noqa
from .environment import environ # noqa
# Parse version number from module-level ASCII file
__version__ = open(join(dirname(__file__), "VERSION")).read().strip()
def test():
"""Use pytest to collect and run all tests in typhon.tests."""
import pytest
return pytest.main(["--pyargs", "typhon.tests"])
_logger = logging.getLogger(__name__)
@functools.lru_cache()
def _ensure_handler(handler=None, formatter=None):
"""Make sure that a handler is attached to the root logger.
The LRU cache ensures that a new handler is only created during the
first call of the function. From then on, this handler is reused.
"""
if handler is None:
handler = logging.StreamHandler()
if formatter is None:
formatter = logging.Formatter(logging.BASIC_FORMAT)
handler.setFormatter(formatter)
_logger.addHandler(handler)
return handler
def set_loglevel(level, handler=None, formatter=None):
"""Set the loglevel of the package.
Parameters:
level (int): Loglevel according to the ``logging`` module.
handler (``logging.Handler``): Logging handler.
formatter (``logging.Formatter``): Logging formatter.
"""
_logger.setLevel(level)
_ensure_handler(handler, formatter).setLevel(level)
def set_fancy_logging(level=None):
"""Create a basic logging config with colorful output format."""
color = "\033[1;%dm"
reset = "\033[0m"
black, red, green, yellow, blue, magenta, cyan, white = [
color % (30 + i) for i in range(8)
]
logformat = (
"["
f"{magenta}%(levelname)s{reset}:"
f"{red}%(asctime)s.%(msecs)03d{reset}:"
f"{yellow}%(filename)s{reset}"
f":{blue}%(lineno)s{reset}"
f":{green}%(funcName)s{reset}"
f"] %(message)s"
)
logging.basicConfig(
format=logformat,
level=level if level is not None else logging.INFO,
datefmt="%H:%M:%S",
)
| {
"content_hash": "ab9301bba1c84c999e266d2b5d42a592",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 28.125,
"alnum_prop": 0.6496969696969697,
"repo_name": "atmtools/typhon",
"id": "74560a6b5d5b2781926eda4e95bce37954da2aa6",
"size": "2475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "typhon/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1368660"
},
{
"name": "Shell",
"bytes": "203"
},
{
"name": "TeX",
"bytes": "315"
}
],
"symlink_target": ""
} |
"""
Made with <3 by Mustafa Gezen (mustafagezen)
The MIT License (MIT)
Copyright (c) 2014 Mustafa Gezen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import requests, json
from struct import Struct
class Bring(object):
BRING_TRACKING_API = "http://sporing.bring.no/sporing.json?q=%s"
BRING_LOCATION_API = "http://fraktguide.bring.no/fraktguide/api/postalCode.json?country=%s&pnr=%s"
BRING_POSTAL_API = "http://fraktguide.bring.no/fraktguide/api/pickuppoint/postalcode/%s.json"
BRING_POSTAL_GEO_API = "http://fraktguide.bring.no/fraktguide/api/pickuppoint/location/%s/%s.json"
def tracking(self, tr_nr):
"""Returns tracking data
DATA(WEIGHT_KG, STATUS, CITY, DESCRIPTION, COUNTRY, COUNTRY_CODE, UNIT_TYPE, POSTAL_CODE, SIGNATURE, RAW, RAW_STRING)
@param :tr_nr Tracking number
"""
FINAL_URI = self.BRING_TRACKING_API % tr_nr
REQUEST = requests.get(FINAL_URI)
JSON = json.loads(REQUEST.text)["consignmentSet"][0]
PACKAGE_SET = JSON["packageSet"][0]
EVENT_SET = PACKAGE_SET["eventSet"][0]
DATA = {
'WEIGHT_KG': JSON["totalWeightInKgs"],
'STATUS': EVENT_SET["status"],
'CITY': EVENT_SET["city"],
'DESCRIPTION': EVENT_SET["description"],
'COUNTRY': EVENT_SET["country"],
'COUNTRY_CODE': EVENT_SET["countryCode"],
'UNIT_TYPE': EVENT_SET["unitType"],
'POSTAL_CODE': EVENT_SET["postalCode"],
'SIGNATURE': EVENT_SET["recipientSignature"]["name"],
'RAW': json.loads(REQUEST.text),
'RAW_STRING': str(json.loads(REQUEST.text))
}
for (i, val) in enumerate(DATA):
if not DATA[val]:
DATA[val] = None
return Struct(DATA)
def postal(self, country, postal):
"""Returns postal information
DATA(RESULT, POSTAL_TYPE, RAW, RAW_STRING)
@param :country Country Code
@param :postal Postal code
"""
FINAL_URI = self.BRING_LOCATION_API % (country, postal)
REQUEST = requests.get(FINAL_URI)
JSON = json.loads(REQUEST.text)
DATA = {
'RESULT': None,
'POSTAL_TYPE': None,
'RAW': None,
'RAW_STRING': None
}
if JSON["valid"] == False:
pass
else:
DATA["RESULT"] = JSON["result"]
DATA["POSTAL_TYPE"] = JSON["postalCodeType"]
DATA["RAW"] = json.loads(REQUEST.text)
DATA["RAW_string"] = str(json.loads(REQUEST.text))
return Struct(DATA)
def pickup(self, postal=None, geo=None):
"""Returns the nearest pickup point
DATA(NAME, ADDRESS, POSTAL, CITY, COUNTRY_CODE, COUNTY, VISITING_ADDRESS, VISITING_POSTAL, LOCATION_DESCRIPTION, OPENING_HOURS_NO, OPENING_HOURS_EN, RAW, RAW_STRING)
@param(optional) :postal Postal code
@param(optional) :geo Geographic location in array, ex. [59.7263078,10.2250463]
@required :postal or :geo
"""
DATA = {
'NAME': None,
'ADDRESS': None,
'POSTAL': None,
'CITY': None,
'COUNTRY_CODE': None,
'COUNTY': None,
'VISITING_ADDRESS': None,
'VISITING_POSTAL': None,
'LOCATION_DESCRIPTION': None,
'OPENING_HOURS_NO': None,
'OPENING_HOURS_EN': None,
'RAW': None,
'RAW_STRING': None
}
if postal != None and geo != None:
raise Exception("You cannot provide both postal code and geographic locations!")
elif postal != None:
FINAL_URI = self.BRING_POSTAL_API % postal
elif geo != None:
FINAL_URI = self.BRING_POSTAL_GEO_API % (geo[0], geo[1])
else:
raise Exception("Unexpected error")
REQUEST = requests.get(FINAL_URI)
JSON = json.loads(REQUEST.text)["pickupPoint"][0]
DATA["NAME"] = JSON["name"]
DATA["ADDRESS"] = JSON["address"]
DATA["POSTAL"] = JSON["postalCode"]
DATA["CITY"] = JSON["city"]
DATA["COUNTRY_CODE"] = JSON["countryCode"]
DATA["COUNTY"] = JSON["county"]
DATA["VISITING_ADDRESS"] = JSON["visitingAddress"]
DATA["VISITING_POSTAL"] = JSON["visitingPostalCode"]
DATA["LOCATION_DESCRIPTION"] = JSON["locationDescription"]
DATA["OPENING_HOURS_NO"] = JSON["openingHoursNorwegian"]
DATA["OPENING_HOURS_EN"] = JSON["openingHoursEnglish"]
DATA["RAW"] = JSON
DATA["RAW_string"] = str(JSON)
return Struct(DATA)
| {
"content_hash": "205a62becdeb0373ff323a769078b218",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 169,
"avg_line_length": 34.74324324324324,
"alnum_prop": 0.6705562038117464,
"repo_name": "mustafagezen/python-bring",
"id": "c855247dbf44711cddb95332a4cfc1b61b6ba7e9",
"size": "5142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/bring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6335"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import pandas as pd
from astropy import units as u
import pytest
import tardis
from tardis.atomic import AtomData
from tardis.plasma.standard_plasmas import LegacyPlasmaArray
from tardis.plasma.properties import *
# INPUTS
@pytest.fixture
def atomic_data(selected_atoms):
atomic_db_fname = os.path.join(tardis.__path__[0], 'tests', 'data',
'chianti_he_db.h5')
atom_data = AtomData.from_hdf5(atomic_db_fname)
atom_data.prepare_atom_data(selected_atoms)
return atom_data
@pytest.fixture
def number_of_cells():
return 20
@pytest.fixture
def abundance(number_of_cells):
return pd.DataFrame(data=1.0, index=[2],
columns=range(number_of_cells), dtype=np.float64)
@pytest.fixture
def density(number_of_cells):
return np.ones(number_of_cells) * 1e-14
@pytest.fixture
def w(number_of_cells):
return np.ones(number_of_cells) * 0.5
@pytest.fixture
def time_explosion():
return (19 * u.day).to(u.s).value
@pytest.fixture
def t_rad(number_of_cells):
return np.ones(number_of_cells) * 10000
@pytest.fixture
def j_blues(lines):
return pd.DataFrame(1.e-5, index=lines.index, columns=range(20))
@pytest.fixture
def link_t_rad_t_electron():
return 0.9
# GENERAL PROPERTIES
@pytest.fixture
def selected_atoms(abundance):
selected_atoms_module = SelectedAtoms(None)
return selected_atoms_module.calculate(abundance)
@pytest.fixture
def beta_rad(t_rad):
beta_rad_module = BetaRadiation(None)
return beta_rad_module.calculate(t_rad)
@pytest.fixture
def g_electron(beta_rad):
g_electron_module = GElectron(None)
return g_electron_module.calculate(beta_rad)
@pytest.fixture
def number_density(atomic_mass, abundance, density):
number_density_module = NumberDensity(None)
return number_density_module.calculate(atomic_mass, abundance, density)
@pytest.fixture
def t_electrons(t_rad, link_t_rad_t_electron):
electron_temperature_module = ElectronTemperature(None)
return electron_temperature_module.calculate(t_rad, link_t_rad_t_electron)
@pytest.fixture
def beta_electron(t_electrons):
beta_electron_module = BetaElectron(None)
return beta_electron_module.calculate(t_electrons)
# ATOMIC PROPERTIES
@pytest.fixture
def levels(atomic_data, selected_atoms):
levels_module = Levels(None)
return levels_module.calculate(atomic_data, selected_atoms)[0]
@pytest.fixture
def excitation_energy(atomic_data, selected_atoms):
levels_module = Levels(None)
return levels_module.calculate(atomic_data, selected_atoms)[1]
@pytest.fixture
def metastability(atomic_data, selected_atoms):
levels_module = Levels(None)
return levels_module.calculate(atomic_data, selected_atoms)[2]
@pytest.fixture
def g(atomic_data, selected_atoms):
levels_module = Levels(None)
return levels_module.calculate(atomic_data, selected_atoms)[3]
@pytest.fixture
def lines(atomic_data, selected_atoms):
lines_module = Lines(None)
return lines_module.calculate(atomic_data, selected_atoms)[0]
@pytest.fixture
def nu(atomic_data, selected_atoms):
lines_module = Lines(None)
return lines_module.calculate(atomic_data, selected_atoms)[1]
@pytest.fixture
def f_lu(atomic_data, selected_atoms):
lines_module = Lines(None)
return lines_module.calculate(atomic_data, selected_atoms)[2]
@pytest.fixture
def wavelength_cm(atomic_data, selected_atoms):
lines_module = Lines(None)
return lines_module.calculate(atomic_data, selected_atoms)[3]
@pytest.fixture
def ionization_data(atomic_data, selected_atoms):
ionization_data_module = IonizationData(None)
return ionization_data_module.calculate(atomic_data,
selected_atoms)
@pytest.fixture
def atomic_mass(atomic_data, selected_atoms):
atomic_mass_module = AtomicMass(None)
return atomic_mass_module.calculate(atomic_data,
selected_atoms)
@pytest.fixture
def zeta_data(atomic_data, selected_atoms):
zeta_data_module = ZetaData(None)
return zeta_data_module.calculate(atomic_data, selected_atoms)
@pytest.fixture
def lines_upper_level_index(lines, levels):
upper_level_index_module = LinesUpperLevelIndex(None)
return upper_level_index_module.calculate(levels, lines)
@pytest.fixture
def lines_lower_level_index(lines, levels):
lower_level_index_module = LinesLowerLevelIndex(None)
return lower_level_index_module.calculate(levels, lines)
@pytest.fixture
def chi_0(atomic_data):
chi_0_module = Chi0(None)
return chi_0_module.calculate(atomic_data)
# PARTITION FUNCTION PROPERTIES
@pytest.fixture
def level_boltzmann_factor_lte(excitation_energy, g, beta_rad, levels):
level_boltzmann_factor_module = LevelBoltzmannFactorLTE(None)
return level_boltzmann_factor_module.calculate(excitation_energy, g,
beta_rad, levels)
@pytest.fixture
def level_boltzmann_factor_dilute_lte(levels, g, excitation_energy, beta_rad,
w, metastability):
level_boltzmann_factor_module = LevelBoltzmannFactorDiluteLTE(None)
return level_boltzmann_factor_module.calculate(levels, g,
excitation_energy, beta_rad, w, metastability)
@pytest.fixture
def partition_function(level_boltzmann_factor_lte):
partition_function_module = PartitionFunction(None)
return partition_function_module.calculate(level_boltzmann_factor_lte)
# ION POPULATION PROPERTIES
@pytest.fixture
def phi_saha_lte(g_electron, beta_rad, partition_function, ionization_data):
phi_saha_lte_module = PhiSahaLTE(None)
return phi_saha_lte_module.calculate(g_electron, beta_rad,
partition_function, ionization_data)
@pytest.fixture
def phi_saha_nebular(t_rad, w, zeta_data, t_electrons, delta,
g_electron, beta_rad, partition_function, ionization_data):
phi_saha_nebular_module = PhiSahaNebular(None)
return phi_saha_nebular_module.calculate(t_rad, w, zeta_data, t_electrons,
delta, g_electron, beta_rad, partition_function, ionization_data)
@pytest.fixture
def ion_number_density(phi_saha_lte, partition_function, number_density):
ion_number_density_module = IonNumberDensity(None)
ion_number_density, electron_densities = \
ion_number_density_module.calculate(phi_saha_lte, partition_function,
number_density)
return ion_number_density
@pytest.fixture
def electron_densities(phi_saha_lte, partition_function, number_density):
electron_density_module = IonNumberDensity(None)
ion_number_density, electron_densities = \
electron_density_module.calculate(phi_saha_lte, partition_function,
number_density)
return electron_densities
@pytest.fixture
def delta(w, ionization_data, beta_rad, t_electrons, t_rad, beta_electron,
levels):
delta_module = RadiationFieldCorrection(chi_0_species=(2,1))
return delta_module.calculate(w, ionization_data, beta_rad, t_electrons,
t_rad, beta_electron)
# LEVEL POPULATION PROPERTIES
@pytest.fixture
def level_number_density(level_boltzmann_factor_lte, ion_number_density,
levels, partition_function):
level_number_density_module = LevelNumberDensity(None)
return level_number_density_module.calculate(level_boltzmann_factor_lte,
ion_number_density, levels, partition_function)
# RADIATIVE PROPERTIES
@pytest.fixture
def stimulated_emission_factor(g, level_number_density,
lines_lower_level_index, lines_upper_level_index, metastability,
lines):
factor_module = StimulatedEmissionFactor(nlte_species=None)
return factor_module.calculate(g, level_number_density,
lines_lower_level_index, lines_upper_level_index, metastability,
lines)
@pytest.fixture
def tau_sobolev(lines, level_number_density, lines_lower_level_index,
time_explosion, stimulated_emission_factor, j_blues,
f_lu, wavelength_cm):
tau_sobolev_module = TauSobolev(None)
return tau_sobolev_module.calculate(lines, level_number_density,
lines_lower_level_index, time_explosion, stimulated_emission_factor,
j_blues, f_lu, wavelength_cm)
@pytest.fixture
def beta_sobolev(tau_sobolev):
beta_sobolev_module = BetaSobolev(None)
return beta_sobolev_module.calculate(tau_sobolev)
@pytest.fixture
def transition_probabilities(atomic_data, beta_sobolev, j_blues,
stimulated_emission_factor, tau_sobolev):
transition_probabilities_module = TransitionProbabilities(None)
return transition_probabilities_module.calculate(atomic_data, beta_sobolev,
j_blues,
stimulated_emission_factor,
tau_sobolev)
| {
"content_hash": "039060a120c721e7988461538ca3dad0",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 34.01149425287356,
"alnum_prop": 0.7122901881266194,
"repo_name": "orbitfold/tardis",
"id": "8e11b06a99965f82cb7223f4a70c3a7fd24963cc",
"size": "8877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tardis/plasma/tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "124807"
},
{
"name": "C++",
"bytes": "8267"
},
{
"name": "Python",
"bytes": "516768"
},
{
"name": "Shell",
"bytes": "459"
}
],
"symlink_target": ""
} |
def _extract_match(toc, index, seperator=','):
"""
Extracts a path between seperators (,)
:toc (str) Table of contents
:index (int) Index of match
returns full path from match
"""
length = len(toc)
start_index = index
while toc[start_index] != seperator and start_index >= 0:
start_index -= 1
end_index = index
while toc[end_index] != seperator and end_index < length - 1:
end_index += 1
if end_index == length - 1:
end_index += 1
match = toc[start_index+1:end_index].replace(seperator, '')
return match
| {
"content_hash": "6aa3f7aed0e48c942a24316a146bde1c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 65,
"avg_line_length": 29.3,
"alnum_prop": 0.5989761092150171,
"repo_name": "codecov/pathmap",
"id": "98d9f97702c612f7a36a54afc5dcac00b5aa78b4",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pathmap/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "113"
},
{
"name": "Python",
"bytes": "29771"
}
],
"symlink_target": ""
} |
import functools
from operator import mul
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import max_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv_nd
import chainerx
class MaxPoolingND(pooling_nd._PoolingND):
"""Max pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(self, ndim, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
super(MaxPoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all,
return_indices=return_indices)
def forward_chainerx(self, x):
# TODO(sonots): Support return_indices in ChainerX
if self.return_indices:
return chainer.Fallback
if x[0].device.backend.name == 'cuda':
# TODO(sonots): Support more ndim in ChainerX
if self.ndim not in [2, 3]:
return chainer.Fallback
return chainerx.max_pool(x[0], self.ksize, self.stride, self.pad,
self.cover_all),
def forward_cpu(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, pval=-float('inf'),
cover_all=self.cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
col_shape = (n, c) + (functools.reduce(mul, ksize),) + outs
col = col.reshape(col_shape)
# We select maximum twice, since the implementation using numpy.choose
# hits its bug when kh * kw >= 32.
self.indexes = col.argmax(axis=2)
y = col.max(axis=2)
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
return super(MaxPoolingND, self).forward_gpu(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
n, c = x[0].shape[:2]
dims = x[0].shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x[0].dtype)
self.indexes = cuda.cupy.empty(y_shape, dtype=numpy.int32)
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelForward.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x[0].reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(y, self.indexes)))
return y,
def backward(self, indexes, gy):
return MaxPoolingNDGrad(self).apply(gy)
def _get_pool_mode(self):
return cuda.cuda.cudnn.CUDNN_POOLING_MAX
class MaxPoolingNDGrad(function_node.FunctionNode):
def __init__(self, mpoolnd):
self.ndim = mpoolnd.ndim
self.ksize = mpoolnd.ksize
self.stride = mpoolnd.stride
self.pad = mpoolnd.pad
self.cover_all = mpoolnd.cover_all
self._used_cudnn = mpoolnd._used_cudnn
if not self._used_cudnn:
self.indexes = mpoolnd.indexes
self._in_shape = mpoolnd._in_shape
self._in_dtype = mpoolnd._in_dtype
self.mpoolnd = mpoolnd
def forward_cpu(self, gy):
ndim = self.ndim
n, c = gy[0].shape[:2]
outs = gy[0].shape[2:]
dims = self._in_shape[2:]
prod_outs = functools.reduce(mul, outs)
prod_ksize = functools.reduce(mul, self.ksize)
gcol = numpy.zeros(
n * c * prod_outs * prod_ksize, dtype=self._in_dtype)
indexes = self.indexes.flatten()
indexes += numpy.arange(0, indexes.size * prod_ksize, prod_ksize)
gcol[indexes] = gy[0].ravel()
gcol_shape = (n, c) + outs + self.ksize
gcol = gcol.reshape(gcol_shape)
for i in six.moves.range(ndim):
gcol = numpy.swapaxes(gcol, 2 + i, ndim + 2 + i)
gx = conv_nd.col2im_nd_cpu(gcol, self.stride, self.pad, dims)
return gx,
def forward_gpu(self, gy):
if self._used_cudnn:
x, = self.mpoolnd._cudnn_inputs
return self.mpoolnd.backward_gpu((x,), gy)
n, c = self._in_shape[:2]
dims = self._in_shape[2:]
ys = gy[0].shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
ndim = self.ndim
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelBackward.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy[0].reduced_view(), self.indexes.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad + (gx,)))
return gx,
def backward(self, indexes, ggx):
return MaxPoolingNDWithIndexes(self.mpoolnd).apply(ggx)
class MaxPoolingNDWithIndexes(function_node.FunctionNode):
def __init__(self, mpoolnd):
self.ndim = mpoolnd.ndim
self.ksize = mpoolnd.ksize
self.stride = mpoolnd.stride
self.pad = mpoolnd.pad
self.cover_all = mpoolnd.cover_all
self._used_cudnn = mpoolnd._used_cudnn
if not self._used_cudnn:
self.indexes = mpoolnd.indexes
else:
self.mpoolnd = mpoolnd
def forward_cpu(self, x):
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, pval=-float('inf'),
cover_all=self.cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
ksize_total = functools.reduce(mul, ksize)
col_shape = (n, c) + (ksize_total,) + outs
col = col.reshape(col_shape)
# (n, c, out_1, ..., out_N, k_1 * .. * k_N)
col_indexes = (0, 1) + tuple(six.moves.range(3, 3 + self.ndim)) + (2,)
col = col.transpose(col_indexes)
col = col.reshape(-1, ksize_total)
indexes = self.indexes.ravel()
col = col[numpy.arange(len(indexes)), indexes]
return col.reshape((n, c) + outs),
def forward_gpu(self, inputs):
if self._used_cudnn:
x, = self.mpoolnd._cudnn_inputs
return self._forward_gpu_compute_indexes_again((x, inputs[0]))
x, = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes
in_params, out_params, operation, name = cls.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(self.indexes.reduced_view(), y)))
return y,
def _forward_gpu_compute_indexes_again(self, inputs):
x, ggx = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes1
in_params, out_params, operation, name = cls.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(ggx.reduced_view(), y)))
return y,
def max_pooling_nd(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""N-dimensionally spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~chainer.functions.max_pooling_2d`. This acts similarly to
:func:`~chainer.functions.convolution_nd`, but it computes the maximum of
input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s,s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
return_indices (bool): If ``True``, pooling indices array is returned
together with the output variable. The returned indices are
expected for use by :func:`chainer.functions.upsampling_nd`.
Note that cuDNN will not be used for this function if
``return_indices`` is set to ``True``, as cuDNN does not return
indices information.
Returns:
~chainer.Variable or tuple:
When ``return_indices`` is ``False`` (default), returns the output
variable.
When ``True``, returns the tuple of the output variable and
pooling indices (`ndarray`). Pooling indices will be on the same
device as the input.
"""
ndim = len(x.shape[2:])
func = MaxPoolingND(ndim, ksize, stride, pad, cover_all, return_indices)
if return_indices:
with chainer.using_config('use_cudnn', 'never'):
out = func.apply((x,))[0]
return out, func.indexes
return func.apply((x,))[0]
def max_pooling_1d(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""1-dimensional spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.max_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.max_pooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return max_pooling_nd(x, ksize, stride, pad, cover_all, return_indices)
def max_pooling_3d(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""3-dimensional spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.max_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.max_pooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return max_pooling_nd(x, ksize, stride, pad, cover_all, return_indices)
| {
"content_hash": "3eec6a8da1bdfd19658c86fab80b6428",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 79,
"avg_line_length": 37.02647058823529,
"alnum_prop": 0.5792358408134085,
"repo_name": "jnishi/chainer",
"id": "f4b757a169fd61120e2fe39bfa9b28d185b59b10",
"size": "12589",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/functions/pooling/max_pooling_nd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1460543"
},
{
"name": "CMake",
"bytes": "42279"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5121452"
},
{
"name": "Shell",
"bytes": "22130"
}
],
"symlink_target": ""
} |
import random
#
# A simple name generator
#
female_names = ["Sarah", "Abby", "Kate", "Caitlin", "Kim", "Allison", "Sky"]
male_names = ["Mark", "Bob", "John", "Sean", "Jake", "Jerry", "Matthew"]
def human(gender):
if gender is "M":
return random.choice(male_names)
return random.choice(female_names) | {
"content_hash": "0cfb5a3f9b775f399d6fb2ab244eccbf",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6480263157894737,
"repo_name": "SkylarKelty/AI",
"id": "4742efdd0b958b004fe6f284ca94c4a23c8744c9",
"size": "304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/util/namegen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15092"
}
],
"symlink_target": ""
} |
import sys, os
__here__ = os.path.abspath(os.path.dirname(__file__))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ham'
copyright = u'2013, Frank Tobia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
exec(open(os.path.join(__here__, '..', '..', 'ham', '_version.py')).read())
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Hamdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Ham.tex', u'Ham Documentation',
u'Frank Tobia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ham', u'Ham Documentation',
[u'Frank Tobia'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ham', u'Ham Documentation',
u'Frank Tobia', 'Ham', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "57fbd4eb290a8ae9293c66c6450caa65",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 148,
"avg_line_length": 32.49576271186441,
"alnum_prop": 0.6987873255965575,
"repo_name": "ftobia/ham",
"id": "c81d0d9bf471cf7818ef26bf11fcd4b1348aa028",
"size": "8083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27544"
},
{
"name": "Shell",
"bytes": "5099"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os.path
from six.moves.urllib.parse import urlencode
from os.path import join, dirname
from sentry.testutils import AcceptanceTestCase
EMAILS = (
('/debug/mail/assigned/', 'assigned'),
('/debug/mail/assigned/self/', 'assigned self'),
('/debug/mail/note/', 'note'),
('/debug/mail/regression/', 'regression'),
('/debug/mail/regression/release/', 'regression with version'),
('/debug/mail/new-release/', 'release'),
('/debug/mail/resolved/', 'resolved'),
('/debug/mail/resolved-in-release/', 'resolved in release'),
('/debug/mail/resolved-in-release/upcoming/', 'resolved in release upcoming'),
('/debug/mail/unassigned/', 'unassigned'),
('/debug/mail/unable-to-fetch-commits/', 'unable to fetch commits'),
('/debug/mail/unable-to-delete-repo/', 'unable to delete repo'),
('/debug/mail/alert/', 'alert'),
('/debug/mail/digest/', 'digest'),
('/debug/mail/invalid-identity/', 'invalid identity'),
('/debug/mail/invitation/', 'invitation'),
('/debug/mail/report/', 'report'),
('/debug/mail/mfa-added/', 'mfa added'),
('/debug/mail/mfa-removed/', 'mfa removed'),
('/debug/mail/recovery-codes-regenerated/', 'recovery codes regenerated'),
('/debug/mail/password-changed/', 'password changed'),
('/debug/mail/sso-linked', 'sso linked'),
('/debug/mail/sso-unlinked', 'sso unlinked'),
('/debug/mail/sso-unlinked/no-password', 'sso unlinked without password'),
)
def read_txt_email_fixture(name):
# "sso unlinked without password"
# => "sso_unlinked_without_password.txt"
filename = name.replace(' ', '_') + '.txt'
path = join(dirname(__file__), os.pardir, 'fixtures', 'emails', filename)
fixture = None
with open(path, 'r') as f:
fixture = f.read()
return fixture
class EmailTestCase(AcceptanceTestCase):
def setUp(self):
super(EmailTestCase, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
def build_url(self, path, format='html'):
return u'{}?{}'.format(
path,
urlencode({
'format': format,
'seed': '123',
}),
)
def test_emails(self):
for url, name in EMAILS:
# HTML output is captured as a snapshot
self.browser.get(self.build_url(url, 'html'))
self.browser.wait_until('#preview')
self.browser.snapshot('{} email html'.format(name))
# Text output is asserted against static fixture files
self.browser.get(self.build_url(url, 'txt'))
self.browser.wait_until('#preview')
elem = self.browser.find_element_by_css_selector('#preview pre')
text_src = elem.get_attribute('innerHTML')
fixture_src = read_txt_email_fixture(name)
assert fixture_src == text_src
| {
"content_hash": "3aae0d747d02440e0dd91734a55771b4",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 82,
"avg_line_length": 37.12658227848101,
"alnum_prop": 0.6096147289464712,
"repo_name": "looker/sentry",
"id": "5be4b63bfe7bfc55d8b080cf718b144d7c5f7280",
"size": "2933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/acceptance/test_emails.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
from quark_runtime import *
import slack
import pkg
def call_main(): import sys; main(_List(sys.argv[1:]))
def main(args):
cli = slack.Client(None, u"fake-token", pkg.Handler());
(cli).onWSMessage(None, u"{\"type\": \"hello\"}");
(cli).onWSMessage(None, u"{\"type\": \"message\", \"user\": \"uid-1\", \"channel\": \"chanel-1\"}");
| {
"content_hash": "f89781d44ad3880f0abdcab2ff240de0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 104,
"avg_line_length": 31.454545454545453,
"alnum_prop": 0.5982658959537572,
"repo_name": "bozzzzo/quark",
"id": "be71f08459317c874a955356ce0f4c573eb85398",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quarkc/test/emit/expected/py/slackpack/slackpack/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "496221"
},
{
"name": "JavaScript",
"bytes": "466971"
},
{
"name": "Python",
"bytes": "590150"
},
{
"name": "Shell",
"bytes": "1328"
}
],
"symlink_target": ""
} |
"""DSA public-key signature algorithm."""
__revision__ = "$Id$"
__all__ = ['generate', 'construct', 'error']
from Crypto.Util.python_compat import *
from Crypto.PublicKey import _DSA, _slowmath, pubkey
from Crypto import Random
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class _DSAobj(pubkey.pubkey):
keydata = ['y', 'g', 'p', 'q', 'x']
def __init__(self, implementation, key):
self.implementation = implementation
self.key = key
def __getattr__(self, attrname):
if attrname in self.keydata:
# For backward compatibility, allow the user to get (not set) the
# DSA key parameters directly from this object.
return getattr(self.key, attrname)
else:
raise AttributeError("%s object has no %r attribute" % (self.__class__.__name__, attrname,))
def _encrypt(self, c, K):
raise TypeError("DSA cannot encrypt")
def _decrypt(self, c):
raise TypeError("DSA cannot decrypt")
def _blind(self, m, r):
raise TypeError("DSA cannot blind")
def _unblind(self, m, r):
raise TypeError("DSA cannot unblind")
def _sign(self, m, k):
return self.key._sign(m, k)
def _verify(self, m, sig):
(r, s) = sig
return self.key._verify(m, r, s)
def has_private(self):
return self.key.has_private()
def size(self):
return self.key.size()
def can_blind(self):
return False
def can_encrypt(self):
return False
def can_sign(self):
return True
def publickey(self):
return self.implementation.construct((self.key.y, self.key.g, self.key.p, self.key.q))
def __getstate__(self):
d = {}
for k in self.keydata:
try:
d[k] = getattr(self.key, k)
except AttributeError:
pass
return d
def __setstate__(self, d):
if not hasattr(self, 'implementation'):
self.implementation = DSAImplementation()
t = []
for k in self.keydata:
if not d.has_key(k):
break
t.append(d[k])
self.key = self.implementation._math.dsa_construct(*tuple(t))
def __repr__(self):
attrs = []
for k in self.keydata:
if k == 'p':
attrs.append("p(%d)" % (self.size()+1,))
elif hasattr(self.key, k):
attrs.append(k)
if self.has_private():
attrs.append("private")
return "<%s @0x%x %s>" % (self.__class__.__name__, id(self), ",".join(attrs))
class DSAImplementation(object):
def __init__(self, **kwargs):
# 'use_fast_math' parameter:
# None (default) - Use fast math if available; Use slow math if not.
# True - Use fast math, and raise RuntimeError if it's not available.
# False - Use slow math.
use_fast_math = kwargs.get('use_fast_math', None)
if use_fast_math is None: # Automatic
if _fastmath is not None:
self._math = _fastmath
else:
self._math = _slowmath
elif use_fast_math: # Explicitly select fast math
if _fastmath is not None:
self._math = _fastmath
else:
raise RuntimeError("fast math module not available")
else: # Explicitly select slow math
self._math = _slowmath
self.error = self._math.error
# 'default_randfunc' parameter:
# None (default) - use Random.new().read
# not None - use the specified function
self._default_randfunc = kwargs.get('default_randfunc', None)
self._current_randfunc = None
def _get_randfunc(self, randfunc):
if randfunc is not None:
return randfunc
elif self._current_randfunc is None:
self._current_randfunc = Random.new().read
return self._current_randfunc
def generate(self, bits, randfunc=None, progress_func=None):
# Check against FIPS 186-2, which says that the size of the prime p
# must be a multiple of 64 bits between 512 and 1024
for i in (0, 1, 2, 3, 4, 5, 6, 7, 8):
if bits == 512 + 64*i:
return self._generate(bits, randfunc, progress_func)
# The March 2006 draft of FIPS 186-3 also allows 2048 and 3072-bit
# primes, but only with longer q values. Since the current DSA
# implementation only supports a 160-bit q, we don't support larger
# values.
raise ValueError("Number of bits in p must be a multiple of 64 between 512 and 1024, not %d bits" % (bits,))
def _generate(self, bits, randfunc=None, progress_func=None):
rf = self._get_randfunc(randfunc)
obj = _DSA.generate_py(bits, rf, progress_func) # TODO: Don't use legacy _DSA module
key = self._math.dsa_construct(obj.y, obj.g, obj.p, obj.q, obj.x)
return _DSAobj(self, key)
def construct(self, tup):
key = self._math.dsa_construct(*tup)
return _DSAobj(self, key)
_impl = DSAImplementation()
generate = _impl.generate
construct = _impl.construct
error = _impl.error
# vim:set ts=4 sw=4 sts=4 expandtab:
| {
"content_hash": "c8af80a788be9bbeca6b8365872b41d3",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 116,
"avg_line_length": 32.157575757575756,
"alnum_prop": 0.5740670938560121,
"repo_name": "cipicip/appengine",
"id": "6349cef8cccbc2ab45a51a8ac981317dd86318e4",
"size": "6405",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "test_crypto/Crypto/PublicKey/DSA.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "HTML",
"bytes": "18890"
},
{
"name": "JavaScript",
"bytes": "100184"
},
{
"name": "Python",
"bytes": "541939"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('lamenews.urls')),
)
| {
"content_hash": "5ec0471058ac0623230dae101ea01c65",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 26.076923076923077,
"alnum_prop": 0.640117994100295,
"repo_name": "aliva/lamenews",
"id": "5bfea91be4443cb314515482985c547fbeec127d",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1359"
},
{
"name": "JavaScript",
"bytes": "690"
},
{
"name": "Python",
"bytes": "58959"
}
],
"symlink_target": ""
} |
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.utilities import abstractMethod
from evolvable import Evolvable
from pybrain.structure.parametercontainer import ParameterContainer
class TopologyEvolvable(ParameterContainer):
""" An evolvable object, with higher-level mutations,
that change the topology (in the broadest sense).
It contains an instance of ParameterContainer. """
pcontainer = None
def __init__(self, pcontainer, **args):
self.setArgs(**args)
self.pcontainer = pcontainer
@property
def params(self):
return self.pcontainer.params
def _setParameters(self, x):
self.pcontainer._setParameters(x)
def topologyMutate(self):
abstractMethod()
def newSimilarInstance(self):
""" generate a new Evolvable with the same topology """
res = self.copy()
res.randomize()
return res
def copy(self):
""" copy everything, except the pcontainer """
# CHECKME: is this correct, or might it be misleading?
tmp = self.pcontainer
self.pcontainer = None
cp = Evolvable.copy(self)
cp.pcontainer = tmp
self.pcontainer = tmp
return cp | {
"content_hash": "f4b75f0dde2ac6b312dda3c9209c7104",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 67,
"avg_line_length": 29.13953488372093,
"alnum_prop": 0.6384676775738228,
"repo_name": "rbalda/neural_ocr",
"id": "9b93ecb800a461bd83df4a5e2860f01821bc0d4c",
"size": "1253",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/pybrain/structure/evolvables/topology.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "497604"
},
{
"name": "C++",
"bytes": "3309990"
},
{
"name": "CSS",
"bytes": "135235"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "215390"
},
{
"name": "JavaScript",
"bytes": "206780"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "26980034"
},
{
"name": "Shell",
"bytes": "3895"
}
],
"symlink_target": ""
} |
"""party_affiliation table
Revision ID: 443b3b630b0f
Revises: df3415c2347
Create Date: 2014-05-31 02:22:34.753596
"""
# revision identifiers, used by Alembic.
revision = '443b3b630b0f'
down_revision = 'df3415c2347'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('party_affiliation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('party_id', sa.Integer(), nullable=False),
sa.Column('date', sa.Date(), nullable=False),
sa.ForeignKeyConstraint(['party_id'], ['party.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['person.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_party_affiliation_date'), 'party_affiliation', ['date'], unique=False)
op.create_index(op.f('ix_party_affiliation_party_id'), 'party_affiliation', ['party_id'], unique=False)
op.create_index(op.f('ix_party_affiliation_person_id'), 'party_affiliation', ['person_id'], unique=False)
def downgrade():
op.drop_index(op.f('ix_party_affiliation_person_id'), table_name='party_affiliation')
op.drop_index(op.f('ix_party_affiliation_party_id'), table_name='party_affiliation')
op.drop_index(op.f('ix_party_affiliation_date'), table_name='party_affiliation')
op.drop_table('party_affiliation')
| {
"content_hash": "9d87f7704602f05fc7a408a0c5bd344c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 109,
"avg_line_length": 36.24324324324324,
"alnum_prop": 0.6935123042505593,
"repo_name": "teampopong/pokr.kr",
"id": "bb7c4e149ab47cc42140037433e89acac5abc03e",
"size": "1341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/443b3b630b0f_party_affiliation_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "216253"
},
{
"name": "HTML",
"bytes": "146548"
},
{
"name": "JavaScript",
"bytes": "143812"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "191129"
},
{
"name": "Shell",
"bytes": "737"
}
],
"symlink_target": ""
} |
"""SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/__init__.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import imp
import sys
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
DefaultToolpath=[]
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP", ".sx"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
class Tool(object):
def __init__(self, name, toolpath=[], **kw):
self.name = name
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _tool_module(self):
# TODO: Interchange zipimport with normal initilization for better error reporting
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
try:
try:
file, path, desc = imp.find_module(self.name, self.toolpath)
try:
return imp.load_module(self.name, file, path, desc)
finally:
if file:
file.close()
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError, e:
pass
finally:
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
file, path, desc = imp.find_module(self.name, smpath)
module = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Tool'].__path__[0] )
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS = [ self.name ])
if hasattr(self, 'options'):
import SCons.Variables
if 'options' not in env:
from SCons.Script import ARGUMENTS
env['options']=SCons.Variables.Variables(args=ARGUMENTS)
opts=env['options']
self.options(opts)
opts.Update(env)
self.generate(env, *args, **kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action = action_list,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.ShLinkAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = '$SHLIBPREFIX',
suffix = '$SHLIBSUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = '$LDMODULEPREFIX',
suffix = '$LDMODULESUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX = '.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX = '.cc')
return (c_file, cxx_file)
##########################################################################
# Create common Java builders
def CreateJarBuilder(env):
try:
java_jar = env['BUILDERS']['Jar']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = '$JARSUFFIX',
src_suffix = '$JAVACLASSSUFIX',
src_builder = 'JavaClassFile',
source_factory = fs.Entry)
env['BUILDERS']['Jar'] = java_jar
return java_jar
def CreateJavaHBuilder(env):
try:
java_javah = env['BUILDERS']['JavaH']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
java_javah_com = SCons.Action.Action('$JAVAHCOM', '$JAVAHCOMSTR')
java_javah = SCons.Builder.Builder(action = java_javah_com,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = fs.Entry,
source_factory = fs.File,
src_builder = 'JavaClassFile')
env['BUILDERS']['JavaH'] = java_javah
return java_javah
def CreateJavaClassFileBuilder(env):
try:
java_class_file = env['BUILDERS']['JavaClassFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_file = SCons.Builder.Builder(action = javac_com,
emitter = {},
#suffix = '$JAVACLASSSUFFIX',
src_suffix = '$JAVASUFFIX',
src_builder = ['JavaFile'],
target_factory = fs.Entry,
source_factory = fs.File)
env['BUILDERS']['JavaClassFile'] = java_class_file
return java_class_file
def CreateJavaClassDirBuilder(env):
try:
java_class_dir = env['BUILDERS']['JavaClassDir']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_dir = SCons.Builder.Builder(action = javac_com,
emitter = {},
target_factory = fs.Dir,
source_factory = fs.Dir)
env['BUILDERS']['JavaClassDir'] = java_class_dir
return java_class_dir
def CreateJavaFileBuilder(env):
try:
java_file = env['BUILDERS']['JavaFile']
except KeyError:
java_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$JAVASUFFIX'})
env['BUILDERS']['JavaFile'] = java_file
env['JAVASUFFIX'] = '.java'
return java_file
class ToolInitializerMethod(object):
"""
This is added to a construction environment in place of a
method(s) normally called for a Builder (env.Object, env.StaticObject,
etc.). When called, it has its associated ToolInitializer
object search the specified list of tools and apply the first
one that exists to the construction environment. It then calls
whatever builder was (presumably) added to the construction
environment in place of this particular instance.
"""
def __init__(self, name, initializer):
"""
Note: we store the tool name as __name__ so it can be used by
the class that attaches this to a construction environment.
"""
self.__name__ = name
self.initializer = initializer
def get_builder(self, env):
"""
Returns the appropriate real Builder for this method name
after having the associated ToolInitializer object apply
the appropriate Tool module.
"""
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder
def __call__(self, env, *args, **kw):
"""
"""
builder = self.get_builder(env)
if builder is None:
return [], []
return builder(*args, **kw)
class ToolInitializer(object):
"""
A class for delayed initialization of Tools modules.
Instances of this class associate a list of Tool modules with
a list of Builder method names that will be added by those Tool
modules. As part of instantiating this object for a particular
construction environment, we also add the appropriate
ToolInitializerMethod objects for the various Builder methods
that we want to use to delay Tool searches until necessary.
"""
def __init__(self, env, tools, names):
if not SCons.Util.is_List(tools):
tools = [tools]
if not SCons.Util.is_List(names):
names = [names]
self.env = env
self.tools = tools
self.names = names
self.methods = {}
for name in names:
method = ToolInitializerMethod(name, self)
self.methods[name] = method
env.AddMethod(method)
def remove_methods(self, env):
"""
Removes the methods that were added by the tool initialization
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
for method in self.methods.values():
env.RemoveMethod(method)
def apply_tools(self, env):
"""
Searches the list of associated Tool modules for one that
exists, and applies that to the construction environment.
"""
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return
# If we fall through here, there was no tool module found.
# This is where we can put an informative error message
# about the inability to find the tool. We'll start doing
# this as we cut over more pre-defined Builder+Tools to use
# the ToolInitializer class.
def Initializers(env):
ToolInitializer(env, ['install'], ['_InternalInstall', '_InternalInstallAs'])
def Install(self, *args, **kw):
return self._InternalInstall(*args, **kw)
def InstallAs(self, *args, **kw):
return self._InternalInstallAs(*args, **kw)
env.AddMethod(Install)
env.AddMethod(InstallAs)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return list(filter (ToolExists, tools))
def tool_list(platform, env):
other_plat_tools=[]
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32' ]
c_compilers = ['msvc', 'mingw', 'gcc', 'intelc', 'icl', 'icc', 'cc', 'bcc32' ]
cxx_compilers = ['msvc', 'intelc', 'icc', 'g++', 'c++', 'bcc32' ]
assemblers = ['masm', 'nasm', 'gas', '386asm' ]
fortran_compilers = ['gfortran', 'g77', 'ifl', 'cvf', 'f95', 'f90', 'fortran']
ars = ['mslib', 'ar', 'tlib']
other_plat_tools=['msvs','midl']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', ]#'mslink']
c_compilers = ['icc', 'gcc',]# 'msvc', 'cc']
cxx_compilers = ['icc', 'g++',]# 'msvc', 'c++']
assemblers = ['nasm',]# 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar',]# 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgic++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['sunc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['sunf95', 'sunf90', 'sunf77', 'f95', 'f90', 'f77',
'gfortran', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'aixf77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'darwin':
"prefer GNU tools on Mac OS X, except for some linkers and IBM tools"
linkers = ['applelink', 'gnulink']
c_compilers = ['gcc', 'cc']
cxx_compilers = ['g++', 'c++']
assemblers = ['as']
fortran_compilers = ['gfortran', 'f95', 'f90', 'g77']
ars = ['ar']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'intelc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
other_tools = FindAllTools(other_plat_tools + [
'dmd',
#TODO: merge 'install' into 'filesystem' and
# make 'filesystem' the default
'filesystem',
'm4',
'wix', #'midl', 'msvs',
# Parser generators
'lex', 'yacc',
# Foreign function interface
'rpcgen', 'swig',
# Java
'jar', 'javac', 'javah', 'rmic',
# TeX
'dvipdf', 'dvips', 'gs',
'tex', 'latex', 'pdflatex', 'pdftex',
# Archivers
'tar', 'zip', 'rpm',
# SourceCode factories
'BitKeeper', 'CVS', 'Perforce',
'RCS', 'SCCS', # 'Subversion',
], env)
tools = ([linker, c_compiler, cxx_compiler,
fortran_compiler, assembler, ar]
+ other_tools)
return [x for x in tools if x]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "ce48db3e62774e650d6075fcbcce5661",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 107,
"avg_line_length": 39.42290748898679,
"alnum_prop": 0.5331321935411778,
"repo_name": "angad/libjingle-mac",
"id": "5bee64dd9a5346c0a534aefa21e7c78630aff130",
"size": "26847",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "scons-2.2.0/engine/SCons/Tool/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2015946"
},
{
"name": "C++",
"bytes": "9306077"
},
{
"name": "Objective-C",
"bytes": "28091"
},
{
"name": "Perl",
"bytes": "50523"
},
{
"name": "Python",
"bytes": "4283804"
},
{
"name": "Shell",
"bytes": "1445083"
}
],
"symlink_target": ""
} |
from . import ir_report
| {
"content_hash": "308338efd503580dd5377fa4e212f2d4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.75,
"repo_name": "vileopratama/vitech",
"id": "521aa82b53c9240d1d25080498ac87e2a297dabf",
"size": "170",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "src/addons/report_xlsx/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TestCss(models.Model):
title = models.CharField(max_length=255)
css = models.TextField()
def __str__(self):
return self.title
@python_2_unicode_compatible
class TestHTML(models.Model):
title = models.CharField(max_length=255)
html = models.TextField()
def __str__(self):
return self.title
| {
"content_hash": "19a23b36abe701eada8b42badc3598a9",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 61,
"avg_line_length": 23.05,
"alnum_prop": 0.7006507592190889,
"repo_name": "sk1p/django-codemirror2",
"id": "e693b6668102bdbbb5a49c640fdf5fab9bd34dea",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/testapp/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "190"
},
{
"name": "Python",
"bytes": "6561"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
} |
"""The Python 2 and 3 compatible type definitions."""
import sys
# pylint: disable=invalid-name,undefined-variable
if sys.version_info[0] < 3:
BYTES_TYPE = str
INTEGER_TYPES = (int, long)
STRING_TYPES = (basestring, )
UNICODE_TYPE = unicode
else:
BYTES_TYPE = bytes
INTEGER_TYPES = (int, )
STRING_TYPES = (str, )
UNICODE_TYPE = str
| {
"content_hash": "b5286bacd8e70e7e0ac51d061b134853",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 20.705882352941178,
"alnum_prop": 0.6761363636363636,
"repo_name": "rgayon/l2tdevtools",
"id": "87fbeefe0edad6af13c07c736bcb1117a47c0757",
"size": "376",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "l2tdevtools/py2to3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1333"
},
{
"name": "PowerShell",
"bytes": "1635"
},
{
"name": "Python",
"bytes": "519038"
},
{
"name": "Shell",
"bytes": "14276"
}
],
"symlink_target": ""
} |
__author__ = 'buec'
from abc import ABCMeta
from abc import abstractmethod
class BaseParser(metaclass=ABCMeta):
@abstractmethod
def parse_string(self, data):
pass
def parse_from_file(self, file_path):
f = open(file_path, 'r')
content = f.read()
f.close()
return self.parse_string(content)
@abstractmethod
def write_string(self, model):
pass
def write_to_file(self, model, file_path):
f = open(file_path, 'w')
f.write(self.write_string(model))
f.close() | {
"content_hash": "00b8d4421d2c683cb08842fd79c96a72",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 46,
"avg_line_length": 20.59259259259259,
"alnum_prop": 0.6007194244604317,
"repo_name": "ynop/pyspeechgrammar",
"id": "45a05e2923db7fd8eb2f790312668efd2102a5ee",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspeechgrammar/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58213"
}
],
"symlink_target": ""
} |
import json
from flask import session, Blueprint, jsonify
from lexos.helpers import constants as constants
from lexos.managers import session_manager as session_manager
from lexos.models.statistics_model import StatsModel
from lexos.views.base import render
statistics_blueprint = Blueprint("statistics", __name__)
@statistics_blueprint.route("/statistics", methods=["GET"])
def statistics() -> str:
""" Gets the statistics page.
:return: The statistics page.
"""
# Set the default options
if "analyoption" not in session:
session["analyoption"] = constants.DEFAULT_ANALYZE_OPTIONS
# Return the statistics page
return render("statistics.html")
@statistics_blueprint.route("/statistics/corpus", methods=["POST"])
def corpus() -> str:
""" Gets the corpus statistics.
:return: The corpus statistics.
"""
# Cache the options
session_manager.cache_analysis_option()
# Return the statistics
file_result = StatsModel().get_corpus_stats()
return json.dumps({
"unit": file_result.unit,
"average": file_result.mean,
"standard_deviation": file_result.std_deviation,
"interquartile_range": file_result.inter_quartile_range,
"standard_error_small": file_result.anomaly_se.small_items,
"standard_error_large": file_result.anomaly_se.large_items,
"interquartile_range_small": file_result.anomaly_iqr.small_items,
"interquartile_range_large": file_result.anomaly_iqr.large_items
})
@statistics_blueprint.route("/statistics/document-statistics",
methods=["POST"])
def documents() -> str:
"""Get the statistics of the individual documents.
:return: The statistics of the individual documents.
"""
session_manager.cache_analysis_option()
return jsonify(StatsModel().get_document_statistics())
@statistics_blueprint.route("/statistics/box-plot", methods=["POST"])
def box_plot() -> str:
""" Get a Plotly box plot of the document sizes.
:return: The Plotly box plot of the document sizes.
"""
session_manager.cache_analysis_option()
return StatsModel().get_box_plot()
| {
"content_hash": "24815b7fe368099cbdc0cefee23af852",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 73,
"avg_line_length": 31.434782608695652,
"alnum_prop": 0.6892577224527432,
"repo_name": "WheatonCS/Lexos",
"id": "dcd33b3028b427d0e57455e868e74d814b146bef",
"size": "2169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lexos/views/statistics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50151"
},
{
"name": "HTML",
"bytes": "204190"
},
{
"name": "JavaScript",
"bytes": "204167"
},
{
"name": "Python",
"bytes": "855625"
}
],
"symlink_target": ""
} |
import scrapy
class TemplateprojectItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| {
"content_hash": "b6b5e32fc8394e559889de3e8c217ac6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 20.285714285714285,
"alnum_prop": 0.6971830985915493,
"repo_name": "Ertinfagor/ScrapySplash",
"id": "d6168d8d84acf0def3286a8187637e3ac9b3e4ce",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templateproject/templateproject/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7135"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
from django.conf.urls import url
from django.db import connection, connections, transaction
from django.http import Http404
from django.test import TestCase, TransactionTestCase, override_settings
from django.utils.decorators import method_decorator
from rest_framework import status
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from rest_framework.views import APIView
from tests.models import BasicModel
factory = APIRequestFactory()
class BasicView(APIView):
def post(self, request, *args, **kwargs):
BasicModel.objects.create()
return Response({'method': 'GET'})
class ErrorView(APIView):
def post(self, request, *args, **kwargs):
BasicModel.objects.create()
raise Exception
class APIExceptionView(APIView):
def post(self, request, *args, **kwargs):
BasicModel.objects.create()
raise APIException
class NonAtomicAPIExceptionView(APIView):
@method_decorator(transaction.non_atomic_requests)
def dispatch(self, *args, **kwargs):
return super(NonAtomicAPIExceptionView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
BasicModel.objects.all()
raise Http404
urlpatterns = (
url(r'^$', NonAtomicAPIExceptionView.as_view()),
)
@unittest.skipUnless(
connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints."
)
class DBTransactionTests(TestCase):
def setUp(self):
self.view = BasicView.as_view()
connections.databases['default']['ATOMIC_REQUESTS'] = True
def tearDown(self):
connections.databases['default']['ATOMIC_REQUESTS'] = False
def test_no_exception_commit_transaction(self):
request = factory.post('/')
with self.assertNumQueries(1):
response = self.view(request)
assert not transaction.get_rollback()
assert response.status_code == status.HTTP_200_OK
assert BasicModel.objects.count() == 1
@unittest.skipUnless(
connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints."
)
class DBTransactionErrorTests(TestCase):
def setUp(self):
self.view = ErrorView.as_view()
connections.databases['default']['ATOMIC_REQUESTS'] = True
def tearDown(self):
connections.databases['default']['ATOMIC_REQUESTS'] = False
def test_generic_exception_delegate_transaction_management(self):
"""
Transaction is eventually managed by outer-most transaction atomic
block. DRF do not try to interfere here.
We let django deal with the transaction when it will catch the Exception.
"""
request = factory.post('/')
with self.assertNumQueries(3):
# 1 - begin savepoint
# 2 - insert
# 3 - release savepoint
with transaction.atomic():
self.assertRaises(Exception, self.view, request)
assert not transaction.get_rollback()
assert BasicModel.objects.count() == 1
@unittest.skipUnless(
connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints."
)
class DBTransactionAPIExceptionTests(TestCase):
def setUp(self):
self.view = APIExceptionView.as_view()
connections.databases['default']['ATOMIC_REQUESTS'] = True
def tearDown(self):
connections.databases['default']['ATOMIC_REQUESTS'] = False
def test_api_exception_rollback_transaction(self):
"""
Transaction is rollbacked by our transaction atomic block.
"""
request = factory.post('/')
num_queries = 4 if connection.features.can_release_savepoints else 3
with self.assertNumQueries(num_queries):
# 1 - begin savepoint
# 2 - insert
# 3 - rollback savepoint
# 4 - release savepoint
with transaction.atomic():
response = self.view(request)
assert transaction.get_rollback()
assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
assert BasicModel.objects.count() == 0
@unittest.skipUnless(
connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints."
)
@override_settings(ROOT_URLCONF='tests.test_atomic_requests')
class NonAtomicDBTransactionAPIExceptionTests(TransactionTestCase):
def setUp(self):
connections.databases['default']['ATOMIC_REQUESTS'] = True
def tearDown(self):
connections.databases['default']['ATOMIC_REQUESTS'] = False
def test_api_exception_rollback_transaction_non_atomic_view(self):
response = self.client.get('/')
# without checking connection.in_atomic_block view raises 500
# due attempt to rollback without transaction
assert response.status_code == status.HTTP_404_NOT_FOUND
| {
"content_hash": "9f24c25861765134223fb87756cabe9a",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 81,
"avg_line_length": 32.73856209150327,
"alnum_prop": 0.6813735276502296,
"repo_name": "kgeorgy/django-rest-framework",
"id": "697c549dea0aadab3d7416ff59b07d4a2b84d661",
"size": "5009",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_atomic_requests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39386"
},
{
"name": "HTML",
"bytes": "84905"
},
{
"name": "JavaScript",
"bytes": "18201"
},
{
"name": "Python",
"bytes": "1218922"
}
],
"symlink_target": ""
} |
import json
import os
# Experiment settings
class XPParams:
debug = False
realExper = False
useCrossValidation = False
currentIteration = 0
baseline = False
useAutoGeneratedPOS = True
useAutoGeneratedDEP = True
useUniversalPOSTag = True
# Corpus and configuration paths
class Paths:
projectPath = os.path.dirname(__file__)[:-len(os.path.basename(os.path.dirname(__file__)))]
configsFolder = os.path.join(projectPath, 'Experiments/xp')
corporaPath = os.path.join(projectPath, "sharedtask/")
# Feature extraction configurations
class FeatParams:
usePreciseDictionary = False
# Featue extraction parameters
useFirstBufferElement = True
useSecondBufferElement = True
useToken = True
useLemma = True
usePOS = True
useBiGram = True
useTriGram = True
useDistance = True
useSyntax = True
generateS0B2Bigram = True
useDictionary = True
historyLength1 = True
historyLength2 = True
historyLength3 = True
useS0B0Distance = True
useS0S1Distance = True
useStackLength = True
enhanceMerge = False
enableSingleMWE = False
useLexic = False
smartMWTDetection = True
generateS1B1 = False
def __init__(self, filePath):
with open(filePath, 'r') as configFile:
config = json.load(configFile)
if len(filePath.split('/')) > 0:
Paths.xpName = filePath.split('/')[-1].split('.')[0]
# Paths.configsFolder = filePath
if "generateS1B1" in config.keys():
FeatParams.generateS1B1 = config["generateS1B1"]
if "enhanceMerge" in config.keys():
FeatParams.enhanceMerge = config["enhanceMerge"]
if "usePreciseDictionary" in config.keys():
FeatParams.usePreciseDictionary = config["usePreciseDictionary"]
if "useLexic" in config.keys():
FeatParams.useLexic = config["useLexic"]
if "enableSingleMWE" in config.keys():
FeatParams.enableSingleMWE = config["enableSingleMWE"]
FeatParams.useFirstBufferElement = config["useFirstBufferElement"]
FeatParams.useSecondBufferElement = config["useSecondBufferElement"]
FeatParams.usePOS = config["UseLinguistInfo"]["usePOS"]
FeatParams.useLemma = config["UseLinguistInfo"]["useLemma"]
FeatParams.useBiGram = config["UseLinguistInfo"]["useBiGram"]
FeatParams.useTriGram = config["UseLinguistInfo"]["useTriGram"]
FeatParams.useS0B0Distance = config["S0B0Distance"]
FeatParams.useS0S1Distance = config["S0S1Distance"]
FeatParams.useStackLength = config["useStackLength"]
FeatParams.useSyntax = config["UseLinguistInfo"]["useSytax"]
FeatParams.generateS0B2Bigram = config["generateS0B2Bigram"]
FeatParams.useDictionary = config["useDictionary"]
FeatParams.historyLength1 = config["useTransitionHistory"]["transitionHistoryLength1"]
FeatParams.historyLength2 = config["useTransitionHistory"]["transitionHistoryLength2"]
FeatParams.historyLength3 = config["useTransitionHistory"]["transitionHistoryLength3"]
| {
"content_hash": "a413dd8d4570e782e5d8dbd9e0973749",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 98,
"avg_line_length": 32.898989898989896,
"alnum_prop": 0.6622658888547743,
"repo_name": "hazemalsaied/IdenSys",
"id": "d87434510a6f620002dcc860b27da58d0107ddb8",
"size": "3257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Src/param.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108071"
},
{
"name": "Smalltalk",
"bytes": "10294539"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
# Python 2.5 doesn't have the json module.
try:
import json
except ImportError:
import simplejson as json
import msparser
import os
import os.path
import sys
# Use unittest2 on versions older than Python 2.7.
if sys.version_info[0] < 3 and sys.version_info[1] < 7:
from unittest2 import TestCase, main
else:
from unittest import TestCase, main
class FakeContext():
def __init__(self, lines=[], filename="fake.txt", baseline=0):
self.index_ = 0
self.lines_ = lines
self.filename_ = filename
self.baseline_ = baseline
def set_content(self, lines, baseline=0):
self.index_ = 0
self.lines_ = lines
self.baseline_ = baseline
def line(self):
return self.baseline_ + self.index_
def readline(self):
if len(self.lines_) > self.index_:
line = self.lines_[self.index_] + "\n"
self.index_ += 1
return line
else:
return ""
def filename(self):
return self.filename_
class ParseHeaderTest(TestCase):
def setUp(self):
self.ctx = FakeContext()
self.mdata = {}
def parse_header(self, lines, baseline=0):
self.ctx.set_content(lines, baseline)
msparser._parse_header(self.ctx, self.mdata)
def test_parse_header(self):
self.parse_header([
"desc: --time-unit=B --pages-as-heap=yes",
"cmd: ./thompson --log=2 --fastpath",
"time_unit: B"
])
self.assertEqual(self.mdata["desc"],
"--time-unit=B --pages-as-heap=yes")
self.assertEqual(self.mdata["cmd"], "./thompson --log=2 --fastpath")
self.assertEqual(self.mdata["time_unit"], "B")
def test_parse_malformed_header(self):
lines = [
"desc: --time-unit=B --pages-as-heap=yes",
"cmd: ./thompson --log=2 --fastpath",
"snapshot=1"
]
try:
self.parse_header(lines, 0)
self.fail("ParseError should have been thrown.")
except msparser.ParseError:
# Capture the exception in a Python 2.5/3.x compatible way.
# See Python porting guide for details: http://goo.gl/yho0u
err = sys.exc_info()[1]
self.assertEqual(err.line, 3)
self.assertEqual(err.filename, self.ctx.filename())
class ParseHeapTreeTest(TestCase):
def parse_heap_tree(self, lines):
self.ctx = FakeContext()
self.ctx.set_content(lines)
return msparser._parse_heap_tree(self.ctx)
def test_parse_one_level_simple(self):
tree = self.parse_heap_tree([
"n0: 50456 0x804BFC0: DancingLinksSolver::build_cover_matrix("
"Sudoku&) (SudokuSolver.cpp:30)"
])
self.assertEqual(tree["nbytes"], 50456)
self.assertEqual(len(tree["children"]), 0)
self.assertEqual(tree["details"], {
"function": "DancingLinksSolver::build_cover_matrix(Sudoku&)",
"address": "0x804BFC0",
"file": "SudokuSolver.cpp",
"line": 30
})
def test_parse_one_level_empty_filename(self):
tree = self.parse_heap_tree(["n0: 24305664 0x7FF0007A5: ???"])
self.assertEqual(tree["nbytes"], 24305664)
self.assertEqual(len(tree["children"]), 0)
self.assertEqual(tree["details"], {
"function": "???",
"address": "0x7FF0007A5",
"file": None,
"line": None
})
def test_parse_one_level_page_allocation(self):
tree = self.parse_heap_tree([
"n0: 165990400 (page allocation syscalls) mmap/mremap/brk, "
"--alloc-fns, etc."
])
self.assertEqual(tree["nbytes"], 165990400)
self.assertEqual(len(tree["children"]), 0)
self.assertEqual(tree["details"], None)
def test_parse_one_level_below_threshold(self):
tree = self.parse_heap_tree([
"n0: 8192 in 1 place, below massif's threshold (01.00%)"
])
self.assertEqual(tree["nbytes"], 8192)
self.assertEqual(len(tree["children"]), 0)
self.assertEqual(tree["details"], None)
def test_parse_multi_levels(self):
tree = self.parse_heap_tree([
"n2: 165990400 (page allocation syscalls) mmap/mremap/brk, "
"--alloc-fns, etc.",
" n2: 111468544 0x5E70169: mmap (syscall-template.S:82)",
" n0: 83079168 0x5E031E7: malloc (arena.c:824)",
" n0: 8192 in 1 place, below massif's threshold (01.00%)",
" n0: 83079168 0x5DFE377: new_heap (arena.c:554)"
])
self.assertEqual(tree["nbytes"], 165990400)
self.assertEqual(len(tree["children"]), 2)
self.assertEqual(tree["details"], None)
child1 = tree["children"][0]
self.assertEqual(child1["nbytes"], 111468544)
self.assertEqual(len(child1["children"]), 2)
self.assertEqual(child1["details"], {
"function": "mmap",
"address": "0x5E70169",
"file": "syscall-template.S",
"line": 82
})
child11 = child1["children"][0]
self.assertEqual(child11["nbytes"], 83079168)
self.assertEqual(len(child11["children"]), 0)
self.assertEqual(child11["details"], {
"function": "malloc",
"address": "0x5E031E7",
"file": "arena.c",
"line": 824
})
child12 = child1["children"][1]
self.assertEqual(child12["nbytes"], 8192)
self.assertEqual(len(child12["children"]), 0)
self.assertEqual(child12["details"], None)
child2 = tree["children"][1]
self.assertEqual(child2["nbytes"], 83079168)
self.assertEqual(len(child2["children"]), 0)
self.assertEqual(child2["details"], {
"function": "new_heap",
"address": "0x5DFE377",
"file": "arena.c",
"line": 554
})
class ParseSnapshotTest(TestCase):
def setUp(self):
self.ctx = FakeContext()
self.mdata = {
"snapshots": [],
"detailed_snapshot_indices": []
}
def parse_snapshot(self, lines, baseline=0):
self.ctx.set_content(lines, baseline)
msparser._parse_snapshots(self.ctx, self.mdata)
def test_parse_empty_snapshot(self):
self.parse_snapshot([
"#-----------",
"snapshot=1",
"#-----------",
"time=100279",
"mem_heap_B=30035968",
"mem_heap_extra_B=0",
"mem_stacks_B=0",
"heap_tree=empty"
])
self.assertEqual(len(self.mdata["detailed_snapshot_indices"]), 0)
self.assertEqual(self.mdata["snapshots"][0], {
"id": 1,
"time": 100279,
"mem_heap": 30035968,
"mem_heap_extra": 0,
"mem_stack": 0,
"heap_tree": None
})
def test_parse_peak_snapshot(self):
self.parse_snapshot([
"#-----------",
"snapshot=1",
"#-----------",
"time=100279",
"mem_heap_B=30035968",
"mem_heap_extra_B=0",
"mem_stacks_B=0",
"heap_tree=peak",
"n0: 8192 in 1 place, below massif's threshold (01.00%)"
])
self.assertEqual(self.mdata["peak_snapshot_index"], 0)
self.assertEqual(self.mdata["detailed_snapshot_indices"][0], 0)
self.assertEqual(len(self.mdata["detailed_snapshot_indices"]), 1)
self.assertIsNotNone(self.mdata["snapshots"][0]["heap_tree"])
def test_parse_malformed_snapshot(self):
lines = [
"#-----------",
"snapshot=1",
"#-----------",
"time=100279",
"foo=30035968",
"mem_heap_extra_B=0"
]
try:
self.parse_snapshot(lines, 345)
self.fail("ParseError should have been thrown.")
except msparser.ParseError:
# Capture the exception in a Python 2.5/3.x compatible way.
# See Python porting guide for details: http://goo.gl/yho0u
err = sys.exc_info()[1]
self.assertEqual(err.line, 350)
self.assertEqual(err.filename, self.ctx.filename())
class TestFullParse(TestCase):
pass
def make_parse_test(path_to_actual, path_to_expected):
def test_parse(self):
actual = msparser.parse_file(path_to_actual)
with open(path_to_expected) as fd_to_expected:
expected = json.load(fd_to_expected)
self.assertEqual(expected, actual)
return test_parse
for filename in os.listdir("test_data"):
if not filename.endswith("json"):
path_to_actual = os.path.join("test_data", filename)
path_to_expected = path_to_actual + ".json"
test_name = "test" + filename.replace(".", "_")
test_function = make_parse_test(path_to_actual, path_to_expected)
test_function.__doc__ = test_name
setattr(TestFullParse, test_name, test_function)
if __name__ == "__main__":
main()
| {
"content_hash": "bae6a0db2fa159389570c66d5f8f2cdd",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 76,
"avg_line_length": 32.67259786476868,
"alnum_prop": 0.5520095850125258,
"repo_name": "wdv4758h/msparser",
"id": "0496b27313e59c212bbcb29f7d5d0e820bf6761e",
"size": "9331",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "msparser_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "109327"
},
{
"name": "Makefile",
"bytes": "250"
},
{
"name": "Python",
"bytes": "27720"
}
],
"symlink_target": ""
} |
import logging
import subprocess
import sys
import workflow
import pepper
import pepper.cli
logger = logging.getLogger(__name__)
def run_alfred(query):
"""Call Alfred with ``query``"""
subprocess.call([
'osascript', '-e',
'tell application "Alfred 2" to search "{}"'.format(query)])
def redirect(args):
run_alfred('salt ' + ' '.join(args))
def salt(func):
def wrapper(wf, *args, **kwargs):
try:
cli = pepper.cli.PepperCli()
cli.parse()
opts = cli.get_login_details()
api = pepper.Pepper(opts['SALTAPI_URL'])
api.login(opts['SALTAPI_USER'], opts['SALTAPI_PASS'], opts['SALTAPI_EAUTH'])
kwargs['api'] = api
except pepper.PepperException, e:
wf.add_item('Salt Exception', str(e))
wf.send_feedback()
else:
return func(wf, *args, **kwargs)
return wrapper
@salt
def jobs(wf, api):
for result in api.runner('jobs.list_jobs').get('return', []):
for jid, jout in result.iteritems():
wf.logger.debug('%s', jout)
wf.add_item(jid, jout['Function'], arg='showjob' + jid, valid=True)
wf.send_feedback()
@salt
def ping(wf, api):
for result in api.local('*', 'test.ping'):
wf.add_item(str(result))
wf.send_feedback()
def main(wf):
if len(wf.args):
if wf.args[0] == 'jobs':
return jobs(wf)
if wf.args[0] == 'ping':
return ping(wf)
if __name__ == '__main__':
sys.exit(main(workflow.Workflow()))
| {
"content_hash": "c33ca535b14f334c40cb3f28ab1e58b2",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 88,
"avg_line_length": 24.4375,
"alnum_prop": 0.559462915601023,
"repo_name": "kfdm/alfred-salt",
"id": "6e564a8d84d3bceec66f3407ba586a8cb915eac1",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148705"
}
],
"symlink_target": ""
} |
"""Module with main classes related to Connections."""
import logging
from enum import Enum
from errno import EBADF, ENOTCONN
from socket import error as SocketError
from socket import SHUT_RDWR
__all__ = ('Connection', 'ConnectionProtocol', 'ConnectionState')
LOG = logging.getLogger(__name__)
class ConnectionState(Enum):
"""Enum of possible general connections states."""
NEW = 0
SETUP = 1
ESTABLISHED = 2
FAILED = 3
FINISHED = 4
class ConnectionProtocol:
"""Class to hold simple protocol information for the connection."""
def __init__(self, name=None, version=None, state=None):
"""Assign parameters to instance variables."""
self.name = name
self.version = version
self.state = state
class Connection(object):
"""Connection class to abstract a network connections."""
def __init__(self, address, port, socket, switch=None):
"""Assign parameters to instance variables.
Args:
address (|hw_address|): Source address.
port (int): Port number.
socket (socket): socket.
switch (:class:`~.Switch`): switch with this connection.
"""
self.address = address
self.port = port
self.socket = socket
self.switch = switch
self.state = ConnectionState.NEW
self.protocol = ConnectionProtocol()
self.remaining_data = b''
def __str__(self):
return f"Connection({self.address!r}, {self.port!r})"
def __repr__(self):
return f"Connection({self.address!r}, {self.port!r}," + \
f" {self.socket!r}, {self.switch!r}, {self.state!r})"
@property
def state(self):
"""Return the state of the connection."""
return self._state
@state.setter
def state(self, new_state):
if new_state not in ConnectionState:
raise Exception('Unknown State', new_state)
# pylint: disable=attribute-defined-outside-init
self._state = new_state
# pylint: enable=attribute-defined-outside-init
LOG.debug('Connection %s changed state: %s',
self.id, self.state)
@property
def id(self): # pylint: disable=invalid-name
"""Return id from Connection instance.
Returns:
string: Connection id.
"""
return (self.address, self.port)
def send(self, buffer):
"""Send a buffer message using the socket from the connection instance.
Args:
buffer (bytes): Message buffer that will be sent.
"""
try:
if self.is_alive():
self.socket.sendall(buffer)
except (OSError, SocketError) as exception:
LOG.debug('Could not send packet. Exception: %s', exception)
self.close()
def close(self):
"""Close the socket from connection instance."""
self.state = ConnectionState.FINISHED
if self.switch and self.switch.connection is self:
self.switch.connection = None
LOG.debug('Shutting down Connection %s', self.id)
try:
self.socket.shutdown(SHUT_RDWR)
self.socket.close()
self.socket = None
LOG.debug('Connection Closed: %s', self.id)
except OSError as exception:
if exception.errno not in (ENOTCONN, EBADF):
raise exception
except AttributeError as exception:
LOG.debug('Socket Already Closed: %s', self.id)
def is_alive(self):
"""Return True if the connection socket is alive. False otherwise."""
return self.socket is not None and self.state not in (
ConnectionState.FINISHED, ConnectionState.FAILED)
def is_new(self):
"""Return True if the connection is new. False otherwise."""
return self.state == ConnectionState.NEW
def is_established(self):
"""Return True if the connection is established. False otherwise."""
return self.state == ConnectionState.ESTABLISHED
def is_during_setup(self):
"""Return True if the connection is in setup state. False otherwise."""
return self.state == ConnectionState.SETUP
def set_established_state(self):
"""Set the connection state to Established."""
self.state = ConnectionState.ESTABLISHED
def set_setup_state(self):
"""Set the connection state to Setup."""
self.state = ConnectionState.SETUP
def update_switch(self, switch):
"""Update switch with this instance of Connection.
Args:
switch (:class:`~.Switch`): switch instance.
"""
self.switch = switch
self.switch.connection = self
| {
"content_hash": "d67438eef580e7c9201ab3dab8cee585",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 79,
"avg_line_length": 31.71812080536913,
"alnum_prop": 0.6100296233601354,
"repo_name": "macartur/kytos",
"id": "b05629290d24eaf65a61c42587f1f9b0be1b480b",
"size": "4726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kytos/core/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187067"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils._encode import _encode, _check_unknown, _unique
__all__ = [
'OneHotEncoder',
'OrdinalEncoder'
]
class _BaseEncoder(TransformerMixin, BaseEstimator):
"""
Base class for encoders that includes the code to categorize and
transform the input features.
"""
def _check_X(self, X):
"""
Perform custom check_array:
- convert list of strings to object dtype
- check for missing values for object dtype data (check_array does
not do that)
- return list of features (arrays): this list of features is
constructed feature by feature to preserve the data types
of pandas DataFrame columns, as otherwise information is lost
and cannot be used, eg for the `categories_` attribute.
"""
if not (hasattr(X, 'iloc') and getattr(X, 'ndim', 0) == 2):
# if not a dataframe, do normal check_array validation
X_temp = check_array(X, dtype=None)
if (not hasattr(X, 'dtype')
and np.issubdtype(X_temp.dtype, np.str_)):
X = check_array(X, dtype=object)
else:
X = X_temp
needs_validation = False
else:
# pandas dataframe, do validation later column by column, in order
# to keep the dtype information to be used in the encoder.
needs_validation = True
n_samples, n_features = X.shape
X_columns = []
for i in range(n_features):
Xi = self._get_feature(X, feature_idx=i)
Xi = check_array(Xi, ensure_2d=False, dtype=None,
force_all_finite=needs_validation)
X_columns.append(Xi)
return X_columns, n_samples, n_features
def _get_feature(self, X, feature_idx):
if hasattr(X, 'iloc'):
# pandas dataframes
return X.iloc[:, feature_idx]
# numpy arrays, sparse arrays
return X[:, feature_idx]
def _fit(self, X, handle_unknown='error'):
X_list, n_samples, n_features = self._check_X(X)
if self.categories != 'auto':
if len(self.categories) != n_features:
raise ValueError("Shape mismatch: if categories is an array,"
" it has to be of shape (n_features,).")
self.categories_ = []
for i in range(n_features):
Xi = X_list[i]
if self.categories == 'auto':
cats = _unique(Xi)
else:
cats = np.array(self.categories[i], dtype=Xi.dtype)
if Xi.dtype != object:
if not np.all(np.sort(cats) == cats):
raise ValueError("Unsorted categories are not "
"supported for numerical categories")
if handle_unknown == 'error':
diff = _check_unknown(Xi, cats)
if diff:
msg = ("Found unknown categories {0} in column {1}"
" during fit".format(diff, i))
raise ValueError(msg)
self.categories_.append(cats)
def _transform(self, X, handle_unknown='error'):
X_list, n_samples, n_features = self._check_X(X)
X_int = np.zeros((n_samples, n_features), dtype=int)
X_mask = np.ones((n_samples, n_features), dtype=bool)
if n_features != len(self.categories_):
raise ValueError(
"The number of features in X is different to the number of "
"features of the fitted data. The fitted data had {} features "
"and the X has {} features."
.format(len(self.categories_,), n_features)
)
for i in range(n_features):
Xi = X_list[i]
diff, valid_mask = _check_unknown(Xi, self.categories_[i],
return_mask=True)
if not np.all(valid_mask):
if handle_unknown == 'error':
msg = ("Found unknown categories {0} in column {1}"
" during transform".format(diff, i))
raise ValueError(msg)
else:
# Set the problematic rows to an acceptable value and
# continue `The rows are marked `X_mask` and will be
# removed later.
X_mask[:, i] = valid_mask
# cast Xi into the largest string type necessary
# to handle different lengths of numpy strings
if (self.categories_[i].dtype.kind in ('U', 'S')
and self.categories_[i].itemsize > Xi.itemsize):
Xi = Xi.astype(self.categories_[i].dtype)
else:
Xi = Xi.copy()
Xi[~valid_mask] = self.categories_[i][0]
# We use check_unknown=False, since _check_unknown was
# already called above.
X_int[:, i] = _encode(Xi, uniques=self.categories_[i],
check_unknown=False)
return X_int, X_mask
def _more_tags(self):
return {'X_types': ['categorical']}
class OneHotEncoder(_BaseEncoder):
"""
Encode categorical features as a one-hot numeric array.
The input to this transformer should be an array-like of integers or
strings, denoting the values taken on by categorical (discrete) features.
The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
encoding scheme. This creates a binary column for each category and
returns a sparse matrix or dense array (depending on the ``sparse``
parameter)
By default, the encoder derives the categories based on the unique values
in each feature. Alternatively, you can also specify the `categories`
manually.
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
.. versionchanged:: 0.20
Parameters
----------
categories : 'auto' or a list of array-like, default='auto'
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories should not mix strings and numeric
values within a single feature, and should be sorted in case of
numeric values.
The used categories can be found in the ``categories_`` attribute.
.. versionadded:: 0.20
drop : {'first', 'if_binary'} or a array-like of shape (n_features,), \
default=None
Specifies a methodology to use to drop one of the categories per
feature. This is useful in situations where perfectly collinear
features cause problems, such as when feeding the resulting data
into a neural network or an unregularized regression.
However, dropping one category breaks the symmetry of the original
representation and can therefore induce a bias in downstream models,
for instance for penalized linear classification or regression models.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one
category is present, the feature will be dropped entirely.
- 'if_binary' : drop the first category in each feature with two
categories. Features with 1 or more than 2 categories are
left intact.
- array : ``drop[i]`` is the category in feature ``X[:, i]`` that
should be dropped.
sparse : bool, default=True
Will return sparse matrix if set True else will return an array.
dtype : number type, default=float
Desired dtype of output.
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order of the features in X and corresponding with the output
of ``transform``). This includes the category specified in ``drop``
(if any).
drop_idx_ : array of shape (n_features,)
- ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category
to be dropped for each feature.
- ``drop_idx_[i] = None`` if no category is to be dropped from the
feature with index ``i``, e.g. when `drop='if_binary'` and the
feature isn't binary.
- ``drop_idx_ = None`` if all the transformed features will be
retained.
See Also
--------
sklearn.preprocessing.OrdinalEncoder : Performs an ordinal (integer)
encoding of the categorical features.
sklearn.feature_extraction.DictVectorizer : Performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : Performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : Binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : Transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
Examples
--------
Given a dataset with two features, we let the encoder find the unique
values per feature and transform the data to a binary one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
One can discard categories not seen during `fit`:
>>> enc = OneHotEncoder(handle_unknown='ignore')
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
>>> enc.fit(X)
OneHotEncoder(handle_unknown='ignore')
>>> enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> enc.transform([['Female', 1], ['Male', 4]]).toarray()
array([[1., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.]])
>>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])
array([['Male', 1],
[None, 2]], dtype=object)
>>> enc.get_feature_names(['gender', 'group'])
array(['gender_Female', 'gender_Male', 'group_1', 'group_2', 'group_3'],
dtype=object)
One can always drop the first column for each feature:
>>> drop_enc = OneHotEncoder(drop='first').fit(X)
>>> drop_enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> drop_enc.transform([['Female', 1], ['Male', 2]]).toarray()
array([[0., 0., 0.],
[1., 1., 0.]])
Or drop a column for feature only having 2 categories:
>>> drop_binary_enc = OneHotEncoder(drop='if_binary').fit(X)
>>> drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray()
array([[0., 1., 0., 0.],
[1., 0., 1., 0.]])
"""
@_deprecate_positional_args
def __init__(self, *, categories='auto', drop=None, sparse=True,
dtype=np.float64, handle_unknown='error'):
self.categories = categories
self.sparse = sparse
self.dtype = dtype
self.handle_unknown = handle_unknown
self.drop = drop
def _validate_keywords(self):
if self.handle_unknown not in ('error', 'ignore'):
msg = ("handle_unknown should be either 'error' or 'ignore', "
"got {0}.".format(self.handle_unknown))
raise ValueError(msg)
# If we have both dropped columns and ignored unknown
# values, there will be ambiguous cells. This creates difficulties
# in interpreting the model.
if self.drop is not None and self.handle_unknown != 'error':
raise ValueError(
"`handle_unknown` must be 'error' when the drop parameter is "
"specified, as both would create categories that are all "
"zero.")
def _compute_drop_idx(self):
if self.drop is None:
return None
elif isinstance(self.drop, str):
if self.drop == 'first':
return np.zeros(len(self.categories_), dtype=object)
elif self.drop == 'if_binary':
return np.array([0 if len(cats) == 2 else None
for cats in self.categories_], dtype=object)
else:
msg = (
"Wrong input for parameter `drop`. Expected "
"'first', 'if_binary', None or array of objects, got {}"
)
raise ValueError(msg.format(type(self.drop)))
else:
try:
self.drop = np.asarray(self.drop, dtype=object)
droplen = len(self.drop)
except (ValueError, TypeError):
msg = (
"Wrong input for parameter `drop`. Expected "
"'first', 'if_binary', None or array of objects, got {}"
)
raise ValueError(msg.format(type(self.drop)))
if droplen != len(self.categories_):
msg = ("`drop` should have length equal to the number "
"of features ({}), got {}")
raise ValueError(msg.format(len(self.categories_),
len(self.drop)))
missing_drops = [(i, val) for i, val in enumerate(self.drop)
if val not in self.categories_[i]]
if any(missing_drops):
msg = ("The following categories were supposed to be "
"dropped, but were not found in the training "
"data.\n{}".format(
"\n".join(
["Category: {}, Feature: {}".format(c, v)
for c, v in missing_drops])))
raise ValueError(msg)
return np.array([np.where(cat_list == val)[0][0]
for (val, cat_list) in
zip(self.drop, self.categories_)],
dtype=object)
def fit(self, X, y=None):
"""
Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
self
"""
self._validate_keywords()
self._fit(X, handle_unknown=self.handle_unknown)
self.drop_idx_ = self._compute_drop_idx()
return self
def fit_transform(self, X, y=None):
"""
Fit OneHotEncoder to X, then transform X.
Equivalent to fit(X).transform(X) but more convenient.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array
Transformed input.
"""
self._validate_keywords()
return super().fit_transform(X, y)
def transform(self, X):
"""
Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array
Transformed input.
"""
check_is_fitted(self)
# validation of X happens in _check_X called by _transform
X_int, X_mask = self._transform(X, handle_unknown=self.handle_unknown)
n_samples, n_features = X_int.shape
if self.drop_idx_ is not None:
to_drop = self.drop_idx_.copy()
# We remove all the dropped categories from mask, and decrement all
# categories that occur after them to avoid an empty column.
keep_cells = X_int != to_drop
n_values = []
for i, cats in enumerate(self.categories_):
n_cats = len(cats)
# drop='if_binary' but feature isn't binary
if to_drop[i] is None:
# set to cardinality to not drop from X_int
to_drop[i] = n_cats
n_values.append(n_cats)
else: # dropped
n_values.append(n_cats - 1)
to_drop = to_drop.reshape(1, -1)
X_int[X_int > to_drop] -= 1
X_mask &= keep_cells
else:
n_values = [len(cats) for cats in self.categories_]
mask = X_mask.ravel()
feature_indices = np.cumsum([0] + n_values)
indices = (X_int + feature_indices[:-1]).ravel()[mask]
indptr = np.empty(n_samples + 1, dtype=int)
indptr[0] = 0
np.sum(X_mask, axis=1, out=indptr[1:])
np.cumsum(indptr[1:], out=indptr[1:])
data = np.ones(indptr[-1])
out = sparse.csr_matrix((data, indices, indptr),
shape=(n_samples, feature_indices[-1]),
dtype=self.dtype)
if not self.sparse:
return out.toarray()
else:
return out
def inverse_transform(self, X):
"""
Convert the data back to the original representation.
In case unknown categories are encountered (all zeros in the
one-hot encoding), ``None`` is used to represent this category.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
The transformed data.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Inverse transformed array.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
n_samples, _ = X.shape
n_features = len(self.categories_)
if self.drop_idx_ is None:
n_transformed_features = sum(len(cats)
for cats in self.categories_)
else:
n_transformed_features = sum(
len(cats) - 1 if to_drop is not None else len(cats)
for cats, to_drop in zip(self.categories_, self.drop_idx_)
)
# validate shape of passed X
msg = ("Shape of the passed X data is not correct. Expected {0} "
"columns, got {1}.")
if X.shape[1] != n_transformed_features:
raise ValueError(msg.format(n_transformed_features, X.shape[1]))
# create resulting array of appropriate dtype
dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
X_tr = np.empty((n_samples, n_features), dtype=dt)
j = 0
found_unknown = {}
for i in range(n_features):
if self.drop_idx_ is None or self.drop_idx_[i] is None:
cats = self.categories_[i]
else:
cats = np.delete(self.categories_[i], self.drop_idx_[i])
n_categories = len(cats)
# Only happens if there was a column with a unique
# category. In this case we just fill the column with this
# unique category value.
if n_categories == 0:
X_tr[:, i] = self.categories_[i][self.drop_idx_[i]]
j += n_categories
continue
sub = X[:, j:j + n_categories]
# for sparse X argmax returns 2D matrix, ensure 1D array
labels = np.asarray(sub.argmax(axis=1)).flatten()
X_tr[:, i] = cats[labels]
if self.handle_unknown == 'ignore':
unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
# ignored unknown categories: we have a row of all zero
if unknown.any():
found_unknown[i] = unknown
# drop will either be None or handle_unknown will be error. If
# self.drop_idx_ is not None, then we can safely assume that all of
# the nulls in each column are the dropped value
elif self.drop_idx_ is not None:
dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
if dropped.any():
X_tr[dropped, i] = self.categories_[i][self.drop_idx_[i]]
j += n_categories
# if ignored are found: potentially need to upcast result to
# insert None values
if found_unknown:
if X_tr.dtype != object:
X_tr = X_tr.astype(object)
for idx, mask in found_unknown.items():
X_tr[mask, idx] = None
return X_tr
def get_feature_names(self, input_features=None):
"""
Return feature names for output features.
Parameters
----------
input_features : list of str of shape (n_features,)
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : ndarray of shape (n_output_features,)
Array of feature names.
"""
check_is_fitted(self)
cats = self.categories_
if input_features is None:
input_features = ['x%d' % i for i in range(len(cats))]
elif len(input_features) != len(self.categories_):
raise ValueError(
"input_features should have length equal to number of "
"features ({}), got {}".format(len(self.categories_),
len(input_features)))
feature_names = []
for i in range(len(cats)):
names = [
input_features[i] + '_' + str(t) for t in cats[i]]
if self.drop_idx_ is not None and self.drop_idx_[i] is not None:
names.pop(self.drop_idx_[i])
feature_names.extend(names)
return np.array(feature_names, dtype=object)
class OrdinalEncoder(_BaseEncoder):
"""
Encode categorical features as an integer array.
The input to this transformer should be an array-like of integers or
strings, denoting the values taken on by categorical (discrete) features.
The features are converted to ordinal integers. This results in
a single column of integers (0 to n_categories - 1) per feature.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
.. versionadded:: 0.20
Parameters
----------
categories : 'auto' or a list of array-like, default='auto'
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories should not mix strings and numeric
values, and should be sorted in case of numeric values.
The used categories can be found in the ``categories_`` attribute.
dtype : number type, default np.float64
Desired dtype of output.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order of the features in X and corresponding with the output
of ``transform``).
See Also
--------
sklearn.preprocessing.OneHotEncoder : Performs a one-hot encoding of
categorical features.
sklearn.preprocessing.LabelEncoder : Encodes target labels with values
between 0 and n_classes-1.
Examples
--------
Given a dataset with two features, we let the encoder find the unique
values per feature and transform the data to an ordinal encoding.
>>> from sklearn.preprocessing import OrdinalEncoder
>>> enc = OrdinalEncoder()
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
>>> enc.fit(X)
OrdinalEncoder()
>>> enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> enc.transform([['Female', 3], ['Male', 1]])
array([[0., 2.],
[1., 0.]])
>>> enc.inverse_transform([[1, 0], [0, 1]])
array([['Male', 1],
['Female', 2]], dtype=object)
"""
@_deprecate_positional_args
def __init__(self, *, categories='auto', dtype=np.float64):
self.categories = categories
self.dtype = dtype
def fit(self, X, y=None):
"""
Fit the OrdinalEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
self
"""
self._fit(X)
return self
def transform(self, X):
"""
Transform X to ordinal codes.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
Returns
-------
X_out : sparse matrix or a 2-d array
Transformed input.
"""
X_int, _ = self._transform(X)
return X_int.astype(self.dtype, copy=False)
def inverse_transform(self, X):
"""
Convert the data back to the original representation.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
The transformed data.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Inverse transformed array.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
n_samples, _ = X.shape
n_features = len(self.categories_)
# validate shape of passed X
msg = ("Shape of the passed X data is not correct. Expected {0} "
"columns, got {1}.")
if X.shape[1] != n_features:
raise ValueError(msg.format(n_features, X.shape[1]))
# create resulting array of appropriate dtype
dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
X_tr = np.empty((n_samples, n_features), dtype=dt)
for i in range(n_features):
labels = X[:, i].astype('int64', copy=False)
X_tr[:, i] = self.categories_[i][labels]
return X_tr
| {
"content_hash": "c11f4de96ce2711d47e13dae2b4b4f63",
"timestamp": "",
"source": "github",
"line_count": 732,
"max_line_length": 79,
"avg_line_length": 37.8551912568306,
"alnum_prop": 0.5576326236015878,
"repo_name": "bnaul/scikit-learn",
"id": "d8df3b33117837584878360530b20885f1883e69",
"size": "27854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/preprocessing/_encoders.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
} |
"""Config flow to configure the PVOutput integration."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any
from pvo import PVOutput, PVOutputAuthenticationError, PVOutputError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigFlow
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CONF_SYSTEM_ID, DOMAIN, LOGGER
async def validate_input(hass: HomeAssistant, *, api_key: str, system_id: int) -> None:
"""Try using the give system id & api key against the PVOutput API."""
session = async_get_clientsession(hass)
pvoutput = PVOutput(
session=session,
api_key=api_key,
system_id=system_id,
)
await pvoutput.system()
class PVOutputFlowHandler(ConfigFlow, domain=DOMAIN):
"""Config flow for PVOutput."""
VERSION = 1
imported_name: str | None = None
reauth_entry: ConfigEntry | None = None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
await validate_input(
self.hass,
api_key=user_input[CONF_API_KEY],
system_id=user_input[CONF_SYSTEM_ID],
)
except PVOutputAuthenticationError:
errors["base"] = "invalid_auth"
except PVOutputError:
LOGGER.exception("Cannot connect to PVOutput")
errors["base"] = "cannot_connect"
else:
await self.async_set_unique_id(str(user_input[CONF_SYSTEM_ID]))
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=self.imported_name or str(user_input[CONF_SYSTEM_ID]),
data={
CONF_SYSTEM_ID: user_input[CONF_SYSTEM_ID],
CONF_API_KEY: user_input[CONF_API_KEY],
},
)
else:
user_input = {}
return self.async_show_form(
step_id="user",
description_placeholders={
"account_url": "https://pvoutput.org/account.jsp"
},
data_schema=vol.Schema(
{
vol.Required(
CONF_API_KEY, default=user_input.get(CONF_API_KEY, "")
): str,
vol.Required(
CONF_SYSTEM_ID, default=user_input.get(CONF_SYSTEM_ID, "")
): int,
}
),
errors=errors,
)
async def async_step_reauth(self, data: Mapping[str, Any]) -> FlowResult:
"""Handle initiation of re-authentication with PVOutput."""
self.reauth_entry = self.hass.config_entries.async_get_entry(
self.context["entry_id"]
)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle re-authentication with PVOutput."""
errors = {}
if user_input is not None and self.reauth_entry:
try:
await validate_input(
self.hass,
api_key=user_input[CONF_API_KEY],
system_id=self.reauth_entry.data[CONF_SYSTEM_ID],
)
except PVOutputAuthenticationError:
errors["base"] = "invalid_auth"
except PVOutputError:
errors["base"] = "cannot_connect"
else:
self.hass.config_entries.async_update_entry(
self.reauth_entry,
data={
**self.reauth_entry.data,
CONF_API_KEY: user_input[CONF_API_KEY],
},
)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self.reauth_entry.entry_id)
)
return self.async_abort(reason="reauth_successful")
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={
"account_url": "https://pvoutput.org/account.jsp"
},
data_schema=vol.Schema({vol.Required(CONF_API_KEY): str}),
errors=errors,
)
| {
"content_hash": "2a985a860c81205e60d3e24a4b035a69",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 87,
"avg_line_length": 35.954198473282446,
"alnum_prop": 0.5441613588110403,
"repo_name": "toddeye/home-assistant",
"id": "25cc68acc240f712d331004cbd53f606de4cc34d",
"size": "4710",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/pvoutput/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""Rename admin role.
Revision ID: 3adc42b4f6b9
Revises: 10adeac7b693
Create Date: 2013-10-10 22:13:16.470076
"""
# revision identifiers, used by Alembic.
revision = '3adc42b4f6b9'
down_revision = '10adeac7b693'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('updated_at', sa.DateTime),
)
def upgrade():
op.execute(roles_table.update()\
.where(roles_table.c.name == 'System Administrator')\
.values(name = 'gGRC Admin'))
def downgrade():
op.execute(roles_table.update()\
.where(roles_table.c.name == 'gGRC Admin')\
.values(name = 'System Administrator'))
| {
"content_hash": "ae2f50910ceb81cd0fa615ebce05ad9e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 59,
"avg_line_length": 23.575757575757574,
"alnum_prop": 0.6928020565552699,
"repo_name": "hasanalom/ggrc-core",
"id": "109f8d45c9c02369c2fc3d3a4f6756e27fe676f7",
"size": "1019",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "src/ggrc_basic_permissions/migrations/versions/20131010221316_3adc42b4f6b9_rename_admin_role.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235548"
},
{
"name": "Cucumber",
"bytes": "140478"
},
{
"name": "HTML",
"bytes": "943449"
},
{
"name": "JavaScript",
"bytes": "1205686"
},
{
"name": "Makefile",
"bytes": "5936"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1874549"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11719"
}
],
"symlink_target": ""
} |
from django.core.exceptions import AppRegistryNotReady
# IMPORTANT: the following code needs to be here so we can import
# our models without having to call django.setup() in every single
# file that tries to imports them
try:
from django.apps import apps
apps.check_apps_ready()
except AppRegistryNotReady:
import django
django.setup()
| {
"content_hash": "5ac5c44ddf72c241a51b9bf3e23b59c8",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 66,
"avg_line_length": 32.18181818181818,
"alnum_prop": 0.7655367231638418,
"repo_name": "OnRampOrg/onramp",
"id": "9d68a4a3ea54cda6e8b566896f5188a81dba9096",
"size": "354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/ui/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1438724"
},
{
"name": "C++",
"bytes": "5976"
},
{
"name": "CSS",
"bytes": "4521"
},
{
"name": "Gnuplot",
"bytes": "701"
},
{
"name": "HTML",
"bytes": "755284"
},
{
"name": "JavaScript",
"bytes": "258733"
},
{
"name": "Makefile",
"bytes": "88917"
},
{
"name": "Perl",
"bytes": "5501"
},
{
"name": "Python",
"bytes": "647003"
},
{
"name": "Roff",
"bytes": "253441"
},
{
"name": "Shell",
"bytes": "8072"
},
{
"name": "SourcePawn",
"bytes": "120276"
},
{
"name": "TeX",
"bytes": "83986"
}
],
"symlink_target": ""
} |
import unittest
import mock
from flashcards import storage
from flashcards.utils import storage as storageUtils
from flashcards.storage import StudySetStorage
from flashcards.sets import StudySet
from flashcards.cards import StudyCard
class TestStorageModule(unittest.TestCase):
@mock.patch('flashcards.storage.os.path.expanduser')
def test_get_storage_path(self, mock_expand_usr):
mock_expand_usr.return_value = '/home/admin'
self.assertEqual('/home/admin/.flashcards', storage.storage_path())
def test_generate_filename_from_str_spaces(self):
result = storageUtils.generate_filename_from_str('file name')
expected = 'file_name'
self.assertEqual(expected, result)
def test_generate_filename_from_str_dashes(self):
result = storageUtils.generate_filename_from_str('file-name')
expected = 'file_name'
self.assertEqual(expected, result)
def test_generate_filename_from_str_alphanum(self):
result = storageUtils.generate_filename_from_str('!f@i#l$e%-^n&a*m(e)0')
expected = 'file_name0'
self.assertEqual(expected, result)
class TestStoredStudySet(unittest.TestCase):
@mock.patch('flashcards.storage.os.path.isfile')
def test_load_study_set_error_filenoexist(self, mock_filecheck):
stored_set = StudySetStorage('/home/usr/.flashcards/set01.json')
# Should raise an error because the file does not exist
mock_filecheck.return_value = False
self.assertRaises(IOError, stored_set.load)
@mock.patch('flashcards.storage.os.rename')
def test_rename_filename(self, mock_rename):
stored_set = StudySetStorage('/home/usr/.flashcards/set01.json')
study_set = StudySet('maths145', 'Maths questions')
study_set.add(StudyCard('2+2=?', '4'))
newname = storageUtils.generate_filename_from_str(study_set.title)
stored_set._rename_filename(newname)
expected = '/home/usr/.flashcards/maths145.json'
self.assertEqual(expected, stored_set._filepath)
| {
"content_hash": "ba8df0e2a046b14ad19a25c8fd77c5de",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 35.44827586206897,
"alnum_prop": 0.7023346303501945,
"repo_name": "zergov/flashcards",
"id": "00c5f3db5cf3479c2e98761eeff6f53f70d94913",
"size": "2056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35491"
},
{
"name": "Shell",
"bytes": "844"
}
],
"symlink_target": ""
} |
""" Execution engine """
from builtins import int
import gzip
import os
import time
import botocore
import csv
import json
import pickle
import logging
from base64 import b64encode
from botocore.exceptions import ClientError
from decimal import Decimal
from dynamo3 import (TYPES, DynamoDBConnection, DynamoKey, LocalIndex,
GlobalIndex, DynamoDBError, Throughput, CheckFailed,
IndexUpdate, Limit, RateLimit, Capacity, Binary)
from dynamo3.constants import RESERVED_WORDS
from pprint import pformat
from pyparsing import ParseException
from .expressions import (ConstraintExpression, UpdateExpression, Visitor,
SelectionExpression)
from .grammar import parser, line_parser
from .models import TableMeta
from .util import resolve, unwrap, plural
LOG = logging.getLogger(__name__)
def default(value):
""" Default encoder for JSON """
if isinstance(value, Decimal):
primative = float(value)
if int(primative) == primative:
return int(primative)
else:
return primative
elif isinstance(value, set):
return list(value)
elif isinstance(value, Binary):
return b64encode(value.value)
raise TypeError("Cannot encode %s value %r" % (type(value), value))
class ExplainSignal(Exception):
""" Thrown to stop a query when we're doing an EXPLAIN """
pass
def add_query_kwargs(kwargs, visitor, constraints, index):
""" Construct KeyConditionExpression and FilterExpression """
(query_const, filter_const) = constraints.remove_index(index)
kwargs['key_condition_expr'] = query_const.build(visitor)
if filter_const:
kwargs['filter'] = filter_const.build(visitor)
if index.name != 'TABLE':
kwargs['index'] = index.name
def iter_insert_items(tree):
""" Iterate over the items to insert from an INSERT statement """
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError("Values '%s' do not match attributes "
"'%s'" % (values, keys))
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found")
class Engine(object):
"""
DQL execution engine
Parameters
----------
connection : :class:`~dynamo3.DynamoDBConnection`, optional
If not present, you will need to call :meth:`.Engine.connect`
Attributes
----------
caution_callback : callable, optional
Called to prompt user when a potentially dangerous action is about to
occur.
"""
def __init__(self, connection=None):
self._connection = None
self.connection = connection
self.cached_descriptions = {}
self._cloudwatch_connection = None
self.allow_select_scan = False
self.reserved_words = RESERVED_WORDS
self._session = None
self.consumed_capacities = []
self._call_list = []
self._explaining = False
self._analyzing = False
self._query_rate_limit = None
self.rate_limit = None
self._encoder = json.JSONEncoder(separators=(',', ':'),
default=default)
self.caution_callback = None
def connect(self, *args, **kwargs):
""" Proxy to DynamoDBConnection.connect. """
self.connection = DynamoDBConnection.connect(*args, **kwargs)
self._session = kwargs.get('session')
if self._session is None:
self._session = botocore.session.get_session()
@property
def region(self):
""" Get the connected dynamo region or host """
return self._connection.region
@property
def connection(self):
""" Get the dynamo connection """
return self._connection
@connection.setter
def connection(self, connection):
""" Change the dynamo connection """
if connection is not None:
connection.subscribe('capacity', self._on_capacity_data)
connection.default_return_capacity = True
if self._connection is not None:
connection.unsubscribe('capacity', self._on_capacity_data)
self._connection = connection
self._cloudwatch_connection = None
self.cached_descriptions = {}
@property
def cloudwatch_connection(self):
""" Lazy create a connection to cloudwatch """
if self._cloudwatch_connection is None:
conn = self._session.create_client('cloudwatch',
self.connection.region)
self._cloudwatch_connection = conn
return self._cloudwatch_connection
def _format_explain(self):
""" Format the results of an EXPLAIN """
lines = []
for (command, kwargs) in self._call_list:
lines.append(command + ' ' + pformat(kwargs))
return '\n'.join(lines)
def _pretty_format(self, statement, result):
""" Format the return value of a query for humans """
if result is None:
return 'Success'
ret = result
if statement.action in ('SELECT', 'SCAN'):
if statement.save_file:
filename = statement.save_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
ret = "Saved %d record%s to %s" % (result, plural(result),
filename)
elif isinstance(result, int):
if result == result.scanned_count:
ret = "%d" % result
else:
ret = "%d (scanned count: %d)" % (result,
result.scanned_count)
elif statement.action == 'UPDATE':
if isinstance(result, int):
ret = "Updated %d item%s" % (result, plural(result))
elif statement.action == 'DELETE':
ret = "Deleted %d item%s" % (result, plural(result))
elif statement.action == 'CREATE':
if result:
ret = "Created table %r" % statement.table
else:
ret = "Table %r already exists" % statement.table
elif statement.action == 'INSERT':
ret = "Inserted %d item%s" % (result, plural(result))
elif statement.action == 'DROP':
if result:
ret = "Dropped table %r" % statement.table
else:
ret = "Table %r does not exist" % statement.table
elif statement.action == 'ANALYZE':
ret = self._pretty_format(statement[1], result)
elif statement.action == 'LOAD':
ret = "Loaded %d item%s" % (result, plural(result))
return ret
def describe_all(self, refresh=True):
""" Describe all tables in the connected region """
tables = self.connection.list_tables()
descs = []
for tablename in tables:
descs.append(self.describe(tablename, refresh))
return descs
def _get_metric(self, metric, tablename, index_name=None):
""" Fetch a read/write capacity metric """
end = time.time()
begin = end - 3 * 60 # 3 minute window
dimensions = [{'Name': 'TableName', 'Value': tablename}]
if index_name is not None:
dimensions.append({'Name': 'GlobalSecondaryIndexName',
'Value': index_name})
period = 60
data = self.cloudwatch_connection.get_metric_statistics(
Period=period,
StartTime=begin,
EndTime=end,
MetricName=metric,
Namespace='AWS/DynamoDB',
Statistics=['Sum'],
Dimensions=dimensions,
)
points = data['Datapoints']
if not points:
return 0
else:
points.sort(key=lambda r: r['Timestamp'])
return float(points[-1]['Sum']) / period
def get_capacity(self, tablename, index_name=None):
""" Get the consumed read/write capacity """
# If we're connected to a DynamoDB Local instance, don't connect to the
# actual cloudwatch endpoint
if self.connection.region == 'local':
return 0, 0
# Gracefully fail if we get exceptions from CloudWatch
try:
return (
self._get_metric('ConsumedReadCapacityUnits', tablename,
index_name),
self._get_metric('ConsumedWriteCapacityUnits', tablename,
index_name),
)
except ClientError:
return 0, 0
def describe(self, tablename, refresh=False, metrics=False, require=False):
""" Get the :class:`.TableMeta` for a table """
table = self.cached_descriptions.get(tablename)
if refresh or table is None or \
(metrics and not table.consumed_capacity):
desc = self.connection.describe_table(tablename)
if desc is None:
if require:
raise RuntimeError("Table %r not found" % tablename)
else:
return None
table = TableMeta.from_description(desc)
self.cached_descriptions[tablename] = table
if metrics:
read, write = self.get_capacity(tablename)
table.consumed_capacity['__table__'] = {
'read': read,
'write': write,
}
for index_name in table.global_indexes:
read, write = self.get_capacity(tablename, index_name)
table.consumed_capacity[index_name] = {
'read': read,
'write': write,
}
return table
def execute(self, commands, pretty_format=False):
"""
Parse and run a DQL string
Parameters
----------
commands : str
The DQL command string
pretty_format : bool
Pretty-format the return value. (e.g. 4 -> 'Updated 4 items')
"""
tree = parser.parseString(commands)
self.consumed_capacities = []
self._analyzing = False
self._query_rate_limit = None
for statement in tree:
try:
result = self._run(statement)
except ExplainSignal:
return self._format_explain()
if pretty_format:
return self._pretty_format(tree[-1], result)
return result
def _run(self, tree):
""" Run a query from a parse tree """
if tree.throttle:
limiter = self._parse_throttle(tree.table, tree.throttle)
self._query_rate_limit = limiter
del tree['throttle']
return self._run(tree)
if tree.action == 'SELECT':
return self._select(tree, self.allow_select_scan)
elif tree.action == 'SCAN':
return self._scan(tree)
elif tree.action == 'DELETE':
return self._delete(tree)
elif tree.action == 'UPDATE':
return self._update(tree)
elif tree.action == 'CREATE':
return self._create(tree)
elif tree.action == 'INSERT':
return self._insert(tree)
elif tree.action == 'DROP':
return self._drop(tree)
elif tree.action == 'ALTER':
return self._alter(tree)
elif tree.action == 'DUMP':
return self._dump(tree)
elif tree.action == 'LOAD':
return self._load(tree)
elif tree.action == 'EXPLAIN':
return self._explain(tree)
elif tree.action == 'ANALYZE':
self._analyzing = True
self.connection.default_return_capacity = True
return self._run(tree[1])
else:
raise SyntaxError("Unrecognized action '%s'" % tree.action)
def _parse_throttle(self, tablename, throttle):
""" Parse a 'throttle' statement and return a RateLimit """
amount = []
desc = self.describe(tablename)
throughputs = [desc.read_throughput, desc.write_throughput]
for value, throughput in zip(throttle[1:], throughputs):
if value == '*':
amount.append(0)
elif value[-1] == '%':
amount.append(throughput * float(value[:-1]) / 100.)
else:
amount.append(float(value))
cap = Capacity(*amount) # pylint: disable=E1120
return RateLimit(total=cap, callback=self._on_throttle)
def _on_capacity_data(self, conn, command, kwargs, response, capacity):
""" Log the received consumed capacity data """
if self._analyzing:
self.consumed_capacities.append((command, capacity))
if self._query_rate_limit is not None:
self._query_rate_limit.on_capacity(conn, command, kwargs, response,
capacity)
elif self.rate_limit is not None:
self.rate_limit.callback = self._on_throttle
self.rate_limit.on_capacity(conn, command, kwargs, response,
capacity)
def _on_throttle(self, conn, command, kwargs, response, capacity, seconds):
""" Print out a message when the query is throttled """
LOG.info("Throughput limit exceeded during %s. "
"Sleeping for %d second%s",
command, seconds, plural(seconds))
def _explain(self, tree):
""" Set up the engine to do a dry run of a query """
self._explaining = True
self._call_list = []
old_call = self.connection.call
def fake_call(command, **kwargs):
""" Replacement for connection.call that logs args """
if command == 'describe_table':
return old_call(command, **kwargs)
self._call_list.append((command, kwargs))
raise ExplainSignal
self.connection.call = fake_call
try:
ret = self._run(tree[1])
try:
list(ret)
except TypeError:
pass
finally:
self.connection.call = old_call
self._explaining = False
def _build_query(self, table, tree, visitor):
""" Build a scan/query from a statement """
kwargs = {}
index = None
if tree.using:
index_name = kwargs['index'] = tree.using[1]
index = table.get_index(index_name)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
possible_hash = constraints.possible_hash_fields()
possible_range = constraints.possible_range_fields()
if index is None:
# See if we can find an index to query on
indexes = table.get_matching_indexes(possible_hash,
possible_range)
if not indexes:
action = 'scan'
kwargs['filter'] = constraints.build(visitor)
kwargs['expr_values'] = visitor.expression_values
kwargs['alias'] = visitor.attribute_names
elif len(indexes) == 1:
index = indexes[0]
action = 'query'
add_query_kwargs(kwargs, visitor, constraints, index)
else:
names = ', '.join([index.name for index in indexes])
raise SyntaxError("No index specified with USING <index>, "
"but multiple possibilities for query: "
"%s" % names)
else:
if index.hash_key in possible_hash:
action = 'query'
add_query_kwargs(kwargs, visitor, constraints, index)
else:
action = 'scan'
if not index.scannable:
raise SyntaxError("Cannot scan local index %r" %
index_name)
kwargs['filter'] = constraints.build(visitor)
kwargs['expr_values'] = visitor.expression_values
kwargs['alias'] = visitor.attribute_names
else:
action = 'scan'
return [action, kwargs, index]
def _iter_where_in(self, tree):
""" Iterate over the KEYS IN and generate primary keys """
desc = self.describe(tree.table, require=True)
for keypair in tree.keys_in:
yield desc.primary_key(*map(resolve, keypair))
def _select(self, tree, allow_select_scan):
""" Run a SELECT statement """
tablename = tree.table
desc = self.describe(tablename, require=True)
kwargs = {}
if tree.consistent:
kwargs['consistent'] = True
visitor = Visitor(self.reserved_words)
selection = SelectionExpression.from_selection(tree.attrs)
if selection.is_count:
kwargs['select'] = 'COUNT'
if tree.keys_in:
if tree.limit:
raise SyntaxError("Cannot use LIMIT with KEYS IN")
elif tree.using:
raise SyntaxError("Cannot use USING with KEYS IN")
elif tree.order:
raise SyntaxError("Cannot use DESC/ASC with KEYS IN")
elif tree.where:
raise SyntaxError("Cannot use WHERE with KEYS IN")
keys = list(self._iter_where_in(tree))
kwargs['attributes'] = selection.build(visitor)
kwargs['alias'] = visitor.attribute_names
return self.connection.batch_get(tablename, keys=keys, **kwargs)
if tree.limit:
if tree.scan_limit:
kwargs['limit'] = Limit(scan_limit=resolve(tree.scan_limit[2]),
item_limit=resolve(tree.limit[1]),
strict=True)
else:
kwargs['limit'] = Limit(item_limit=resolve(tree.limit[1]),
strict=True)
elif tree.scan_limit:
kwargs['limit'] = Limit(scan_limit=resolve(tree.scan_limit[2]))
(action, query_kwargs, index) = self._build_query(desc, tree, visitor)
if action == 'scan' and not allow_select_scan:
raise SyntaxError(
"No index found for query. Please use a SCAN query, or "
"set allow_select_scan=True\nopt allow_select_scan true")
order_by = None
if tree.order_by:
order_by = tree.order_by[0]
reverse = tree.order == 'DESC'
if tree.order:
if action == 'scan' and not tree.order_by:
raise SyntaxError("No index found for query, "
"cannot use ASC or DESC without "
"ORDER BY <field>")
if action == 'query':
if order_by is None or order_by == index.range_key:
kwargs['desc'] = reverse
kwargs.update(query_kwargs)
# This is a special case for when we're querying an index and selecting
# fields that aren't projected into the index.
# We will change the query to only fetch the primary keys, and then
# fill in the selected attributes after the fact.
fetch_attrs_after = False
if (index is not None and
not index.projects_all_attributes(selection.all_fields)):
kwargs['attributes'] = [visitor.get_field(a) for a in
desc.primary_key_attributes]
fetch_attrs_after = True
else:
kwargs['attributes'] = selection.build(visitor)
kwargs['expr_values'] = visitor.expression_values
kwargs['alias'] = visitor.attribute_names
method = getattr(self.connection, action + '2')
result = method(tablename, **kwargs)
# If the queried index didn't project the selected attributes, we need
# to do a BatchGetItem to fetch all the data.
if fetch_attrs_after:
if not isinstance(result, list):
result = list(result)
# If no results, no need to batch_get
if not result:
return result
visitor = Visitor(self.reserved_words)
kwargs = {
'keys': [desc.primary_key(item) for item in result],
}
kwargs['attributes'] = selection.build(visitor)
kwargs['alias'] = visitor.attribute_names
result = self.connection.batch_get(tablename, **kwargs)
def order(items):
""" Sort the items by the specified keys """
if order_by is None:
return items
if index is None or order_by != index.range_key:
if not isinstance(items, list):
items = list(items)
items.sort(key=lambda x: x.get(order_by), reverse=reverse)
return items
# Save the data to a file
if tree.save_file:
if selection.is_count:
raise Exception("Cannot use count(*) with SAVE")
count = 0
result = order(selection.convert(item, True) for item in result)
filename = tree.save_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
# If it's still an iterator, convert to a list so we can iterate
# multiple times.
if not isinstance(result, list):
result = list(result)
remainder, ext = os.path.splitext(filename)
if ext.lower() in ['.gz', '.gzip']:
ext = os.path.splitext(remainder)[1]
opened = gzip.open(filename, 'wb')
else:
opened = open(filename, 'wb')
if ext.lower() == '.csv':
if selection.all_keys:
headers = selection.all_keys
else:
# Have to do this to get all the headers :(
result = list(result)
all_headers = set()
for item in result:
all_headers.update(item.keys())
headers = list(all_headers)
with opened as ofile:
writer = csv.DictWriter(ofile, fieldnames=headers,
extrasaction='ignore')
writer.writeheader()
for item in result:
count += 1
writer.writerow(item)
elif ext.lower() == '.json':
with opened as ofile:
for item in result:
count += 1
ofile.write(self._encoder.encode(item))
ofile.write('\n')
else:
with opened as ofile:
for item in result:
count += 1
pickle.dump(item, ofile)
return count
elif not selection.is_count:
result = order(selection.convert(item) for item in result)
return result
def _scan(self, tree):
""" Run a SCAN statement """
return self._select(tree, True)
def _query_and_op(self, tree, table, method_name, method_kwargs):
""" Query the table and perform an operation on each item """
result = []
if tree.keys_in:
if tree.using:
raise SyntaxError("Cannot use USING with KEYS IN")
keys = self._iter_where_in(tree)
else:
visitor = Visitor(self.reserved_words)
(action, kwargs, _) = self._build_query(table, tree, visitor)
attrs = [visitor.get_field(table.hash_key.name)]
if table.range_key is not None:
attrs.append(visitor.get_field(table.range_key.name))
kwargs['attributes'] = attrs
kwargs['expr_values'] = visitor.expression_values
kwargs['alias'] = visitor.attribute_names
# If there is no 'where' on this update/delete, check with the
# caution_callback before proceeding.
if visitor.expression_values is None and \
callable(self.caution_callback) and \
not self.caution_callback(method_name): # pylint: disable=E1102
return False
method = getattr(self.connection, action + '2')
keys = method(table.name, **kwargs)
if self._explaining:
try:
list(keys)
except ExplainSignal:
keys = [{}]
method = getattr(self.connection, method_name + '2')
count = 0
for key in keys:
try:
ret = method(table.name, key, **method_kwargs)
except CheckFailed:
continue
count += 1
if ret:
result.append(ret)
if result:
return result
else:
return count
def _delete(self, tree):
""" Run a DELETE statement """
tablename = tree.table
table = self.describe(tablename, require=True)
kwargs = {}
visitor = Visitor(self.reserved_words)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
kwargs['condition'] = constraints.build(visitor)
kwargs['expr_values'] = visitor.expression_values
kwargs['alias'] = visitor.attribute_names
return self._query_and_op(tree, table, 'delete_item', kwargs)
def _update(self, tree):
""" Run an UPDATE statement """
tablename = tree.table
table = self.describe(tablename, require=True)
kwargs = {}
if tree.returns:
kwargs['returns'] = '_'.join(tree.returns)
else:
kwargs['returns'] = 'NONE'
visitor = Visitor(self.reserved_words)
updates = UpdateExpression.from_update(tree.update)
kwargs['expression'] = updates.build(visitor)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
kwargs['condition'] = constraints.build(visitor)
kwargs['expr_values'] = visitor.expression_values
kwargs['alias'] = visitor.attribute_names
return self._query_and_op(tree, table, 'update_item', kwargs)
def _create(self, tree):
""" Run a SELECT statement """
tablename = tree.table
indexes = []
global_indexes = []
hash_key = None
range_key = None
attrs = {}
for declaration in tree.attrs:
name, type_ = declaration[:2]
if len(declaration) > 2:
index = declaration[2]
else:
index = None
if index is not None:
if index[0] == 'HASH':
field = hash_key = DynamoKey(name, data_type=TYPES[type_])
elif index[0] == 'RANGE':
field = range_key = DynamoKey(name, data_type=TYPES[type_])
else:
index_type = index[0]
kwargs = {}
if index_type[0] in ('ALL', 'INDEX'):
factory = LocalIndex.all
elif index_type[0] == 'KEYS':
factory = LocalIndex.keys
elif index_type[0] == 'INCLUDE':
factory = LocalIndex.include
kwargs['includes'] = [resolve(v) for v in index.include_vars]
index_name = resolve(index[1])
field = DynamoKey(name, data_type=TYPES[type_])
idx = factory(index_name, field, **kwargs)
indexes.append(idx)
else:
field = DynamoKey(name, data_type=TYPES[type_])
attrs[field.name] = field
for gindex in tree.global_indexes:
global_indexes.append(self._parse_global_index(gindex, attrs))
throughput = None
if tree.throughput:
throughput = Throughput(*map(resolve, tree.throughput))
try:
ret = self.connection.create_table(
tablename, hash_key, range_key, indexes=indexes,
global_indexes=global_indexes, throughput=throughput)
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceInUseException' or tree.not_exists:
return False
raise
return True
def _parse_global_index(self, clause, attrs):
""" Parse a global index clause and return a GlobalIndex """
index_type, name = clause[:2]
name = resolve(name)
def get_key(field, data_type=None):
""" Get or set the DynamoKey from the field name """
if field in attrs:
key = attrs[field]
if data_type is not None:
if TYPES[data_type] != key.data_type:
raise SyntaxError(
"Key %r %s already declared with type %s" %
field, data_type, key.data_type)
else:
if data_type is None:
raise SyntaxError("Missing data type for %r" % field)
key = DynamoKey(field, data_type=TYPES[data_type])
attrs[field] = key
return key
g_hash_key = get_key(*clause.hash_key)
g_range_key = None
# For some reason I can't get the throughput section to have a name
# Use an index instead
tp_index = 3
if clause.range_key:
tp_index += 1
g_range_key = get_key(*clause.range_key)
if clause.include_vars:
tp_index += 1
kwargs = {}
if tp_index < len(clause):
throughput = clause[tp_index]
kwargs['throughput'] = Throughput(*map(resolve, throughput))
index_type = clause.index_type[0]
if index_type in ('ALL', 'INDEX'):
factory = GlobalIndex.all
elif index_type == 'KEYS':
factory = GlobalIndex.keys
elif index_type == 'INCLUDE':
factory = GlobalIndex.include
if not clause.include_vars:
raise SyntaxError("Include index %r missing include fields" %
name)
kwargs['includes'] = [resolve(v) for v in clause.include_vars]
return factory(name, g_hash_key, g_range_key, **kwargs)
def _insert(self, tree):
""" Run an INSERT statement """
tablename = tree.table
count = 0
kwargs = {}
batch = self.connection.batch_write(tablename, **kwargs)
with batch:
for item in iter_insert_items(tree):
batch.put(item)
count += 1
return count
def _drop(self, tree):
""" Run a DROP statement """
tablename = tree.table
kwargs = {}
try:
ret = self.connection.delete_table(tablename, **kwargs)
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceNotFoundException' and tree.exists:
return False
raise
return True
def _update_throughput(self, tablename, read, write, index):
""" Update the throughput on a table or index """
def get_desc():
""" Get the table or global index description """
desc = self.describe(tablename, refresh=True, require=True)
if index is not None:
return desc.global_indexes[index]
return desc
desc = get_desc()
def num_or_star(value):
""" Convert * to 0, otherwise resolve a number """
return 0 if value == '*' else resolve(value)
read = num_or_star(read)
write = num_or_star(write)
if read <= 0:
read = desc.read_throughput
if write <= 0:
write = desc.write_throughput
throughput = Throughput(read, write)
kwargs = {}
if index:
kwargs['global_indexes'] = {
index: throughput,
}
else:
kwargs['throughput'] = throughput
self.connection.update_table(tablename, **kwargs)
desc = get_desc()
while desc.status == 'UPDATING': # pragma: no cover
time.sleep(5)
desc = get_desc()
def _alter(self, tree):
""" Run an ALTER statement """
if tree.throughput:
[read, write] = tree.throughput
index = None
if tree.index:
index = tree.index
self._update_throughput(tree.table, read, write, index)
elif tree.drop_index:
updates = [IndexUpdate.delete(tree.drop_index[0])]
try:
self.connection.update_table(tree.table,
index_updates=updates)
except DynamoDBError as e:
if tree.exists and e.kwargs['Code'] == 'ResourceNotFoundException':
pass
else:
raise
elif tree.create_index:
# GlobalIndex
attrs = {}
index = self._parse_global_index(tree.create_index, attrs)
updates = [IndexUpdate.create(index)]
try:
self.connection.update_table(tree.table,
index_updates=updates)
except DynamoDBError as e:
if (tree.not_exists and
e.kwargs['Code'] == 'ValidationException' and
'already exists' in e.kwargs['Message']):
pass
else:
raise
else:
raise SyntaxError("No alter command found")
def _dump(self, tree):
""" Run a DUMP statement """
schema = []
if tree.tables:
for table in tree.tables:
desc = self.describe(table, refresh=True, require=True)
schema.append(desc.schema)
else:
for table in self.describe_all():
schema.append(table.schema)
return '\n\n'.join(schema)
def _load(self, tree):
""" Run a LOAD statement """
filename = tree.load_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
if not os.path.exists(filename):
raise Exception("No such file %r" % filename)
batch = self.connection.batch_write(tree.table)
count = 0
with batch:
remainder, ext = os.path.splitext(filename)
if ext.lower() in ['.gz', '.gzip']:
ext = os.path.splitext(remainder)[1]
opened = gzip.open(filename, 'rb')
else:
opened = open(filename, 'r')
with opened as ifile:
if ext.lower() == '.csv':
reader = csv.DictReader(ifile)
for row in reader:
batch.put(row)
count += 1
elif ext.lower() == '.json':
for row in ifile:
batch.put(json.loads(row))
count += 1
else:
try:
while True:
batch.put(pickle.load(ifile))
count += 1
except EOFError:
pass
return count
class FragmentEngine(Engine):
"""
A DQL execution engine that can handle query fragments
"""
def __init__(self, connection=None):
super(FragmentEngine, self).__init__(connection)
self.fragments = ''
self.last_query = ''
@property
def partial(self):
""" True if there is a partial query stored """
return len(self.fragments) > 0
def reset(self):
""" Clear any query fragments from the engine """
self.fragments = ''
def execute(self, fragment, pretty_format=True):
"""
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
"""
self.fragments = (self.fragments + '\n' + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ''
return super(FragmentEngine, self).execute(self.last_query,
pretty_format)
return None
def pformat_exc(self, exc):
""" Format an exception message for the last query's parse error """
lines = []
try:
pre_nl = self.last_query.rindex('\n', 0, exc.loc) + 1
except ValueError:
pre_nl = 0
try:
post_nl = self.last_query.index('\n', exc.loc)
except ValueError:
post_nl = len(self.last_query)
lines.append(self.last_query[:post_nl])
lines.append(' ' * (exc.loc - pre_nl) + '^')
lines.append(str(exc))
return '\n'.join(lines)
| {
"content_hash": "852996f73e4c3362c59c38740934f950",
"timestamp": "",
"source": "github",
"line_count": 989,
"max_line_length": 85,
"avg_line_length": 38.48432760364004,
"alnum_prop": 0.5267596752581383,
"repo_name": "mathcamp/dql",
"id": "df9074cb2eb47a1c50dc5ea49981c9817d727e43",
"size": "38061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dql/engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "247512"
},
{
"name": "Shell",
"bytes": "1328"
}
],
"symlink_target": ""
} |
"""
This code demonstrates how to use dedupe with a comma separated values
(CSV) file. All operations are performed in memory, so will run very
quickly on datasets up to ~10,000 rows.
We start with a CSV file containing our messy data. In this example,
it is listings of early childhood education centers in Chicago
compiled from several different sources.
The output will be a CSV with our clustered results.
"""
from future.builtins import next
import os
import csv
import re
import collections
import logging
import optparse
from numpy import nan
import dedupe
from unidecode import unidecode
# ## Logging
# Dedupe uses Python logging to show or suppress verbose output. Added for convenience.
# To enable verbose logging, run `python examples/csv_example/csv_example.py -v`
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose :
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose >= 2:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
# ## Setup
input_file = 'full_outfile.csv'
print("input file: ", input_file)
output_file = 'full_output.csv'
settings_file = 'settings/full_learned_settings'
training_file = 'settings/full_training.json'
def preProcess(column):
"""
Do a little bit of data cleaning with the help of Unidecode and Regex.
Things like casing, extra spaces, quotes and new lines can be ignored.
"""
import unidecode
column = unidecode.unidecode(column)
column = re.sub(' +', ' ', column)
column = re.sub('\n', ' ', column)
column = column.strip().strip('"').strip("'").lower().strip()
if not column :
column = None
return column
def readData(filename):
"""
Read in our data from a CSV file and create a dictionary of records,
where the key is a unique record ID and each value is dict
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
clean_row = [(k, preProcess(v)) for (k, v) in row.items()]
row_id = int(row['Id'])
data_d[row_id] = dict(clean_row)
return data_d
print('importing data ...')
data_d = readData(input_file)
# ## Training
if os.path.exists(settings_file):
print('reading from', settings_file)
with open(settings_file, 'rb') as f:
deduper = dedupe.StaticDedupe(f)
else:
# Define the fields dedupe will pay attention to
#
# Notice how we are telling dedupe to use a custom field comparator
# for the 'Zip' field.
fields = [
{'field': 'City', 'type': 'String', 'has missing': True},
{'field': 'Country', 'type': 'String', 'has missing': True},
]
# Create a new deduper object and pass our data model to it.
deduper = dedupe.Dedupe(fields)
# To train dedupe, we feed it a sample of records.
deduper.sample(data_d, 15000)
# If we have training data saved from a previous run of dedupe,
# look for it an load it in.
# __Note:__ if you want to train from scratch, delete the training_file
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file, 'rb') as f:
deduper.readTraining(f)
# ## Active learning
# Dedupe will find the next pair of records
# it is least certain about and ask you to label them as duplicates
# or not.
# use 'y', 'n' and 'u' keys to flag duplicates
# press 'f' when you are finished
print('starting active labeling...')
dedupe.consoleLabel(deduper)
deduper.train(ppc=0.7)
# When finished, save our training away to disk
with open(training_file, 'w') as tf :
deduper.writeTraining(tf)
# Save our weights and predicates to disk. If the settings file
# exists, we will skip all the training and learning next time we run
# this file.
with open(settings_file, 'wb') as sf :
deduper.writeSettings(sf)
# ## Blocking
print('blocking...')
# ## Clustering
# Find the threshold that will maximize a weighted average of our precision and recall.
# When we set the recall weight to 2, we are saying we care twice as much
# about recall as we do precision.
#
# If we had more data, we would not pass in all the blocked data into
# this function but a representative sample.
threshold = deduper.threshold(data_d, recall_weight=2)
# `match` will return sets of record IDs that dedupe
# believes are all referring to the same entity.
print('clustering...')
clustered_dupes = deduper.match(data_d, threshold)
print('# duplicate sets', len(clustered_dupes))
# ## Writing Results
# Write our original data back out to a CSV with a new column called
# 'Cluster ID' which indicates which records refer to each other.
print("finding canonical")
cluster_membership = {}
cluster_id = 0
for (cluster_id, cluster) in enumerate(clustered_dupes):
id_set, scores = cluster
cluster_d = [data_d[c] for c in id_set]
canonical_rep = dedupe.canonicalize(cluster_d)
for record_id, score in zip(id_set, scores):
cluster_membership[record_id] = {
"cluster id": cluster_id,
"canonical representation": canonical_rep,
"confidence": score
}
singleton_id = cluster_id + 1
print("writing file...: ", output_file)
with open(output_file, 'w') as f_output:
writer = csv.writer(f_output)
with open(input_file) as f_input :
reader = csv.reader(f_input)
heading_row = next(reader)
heading_row.insert(0, 'confidence_score')
heading_row.insert(0, 'Cluster ID')
canonical_keys = canonical_rep.keys()
for key in canonical_keys:
heading_row.append('canonical_' + key)
writer.writerow(heading_row)
for row in reader:
row_id = int(row[0])
if row_id in cluster_membership :
cluster_id = cluster_membership[row_id]["cluster id"]
canonical_rep = cluster_membership[row_id]["canonical representation"]
row.insert(0, cluster_membership[row_id]['confidence'])
row.insert(0, cluster_id)
for key in canonical_keys:
row.append(canonical_rep[key])
else:
row.insert(0, None)
row.insert(0, singleton_id)
singleton_id += 1
for key in canonical_keys:
row.append(None)
writer.writerow(row)
| {
"content_hash": "4810cd709b6ed58212b618ae41900935",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 87,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.6474014336917563,
"repo_name": "davebshow/ten_million_books",
"id": "f6e30d946673cd64bc97c11875029f30df05197b",
"size": "6720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedupe/csv_dedupe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4783282"
},
{
"name": "Python",
"bytes": "8201"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("site", "0019_sitesettings_default_weight_unit")]
operations = [
migrations.AlterModelOptions(
name="sitesettings",
options={
"permissions": (
("manage_settings", "Manage settings."),
("manage_translations", "Manage translations."),
)
},
)
]
| {
"content_hash": "0336ccad4ffcf56b471ae4775d7fa58f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.5201698513800425,
"repo_name": "mociepka/saleor",
"id": "7f7abb200e0c70c7564e0c9302a14133fbfe3f69",
"size": "520",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "saleor/site/migrations/0020_auto_20190301_0336.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import pytest
from plenum.test.helper import sdk_send_random_requests
from stp_core.loop.eventually import eventually
from plenum.common.messages.node_messages import Commit
from plenum.test.delayers import delay
from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \
sentPropagate, recvdPrepareForInstId, recvdCommitForInstId
from plenum.test.test_node import TestNode
from plenum.test.node_request.helper import sdk_ensure_pool_functional
howlong = 20
initial_ledger_size = 0
@pytest.fixture(scope="module")
def tconf(tconf):
OUTDATED_REQS_CHECK_ENABLED_OLD = tconf.OUTDATED_REQS_CHECK_ENABLED
OUTDATED_REQS_CHECK_INTERVAL_OLD = tconf.OUTDATED_REQS_CHECK_INTERVAL
PROPAGATES_PHASE_REQ_TIMEOUT_OLD = tconf.PROPAGATES_PHASE_REQ_TIMEOUT
ORDERING_PHASE_REQ_TIMEOUT_OLD = tconf.ORDERING_PHASE_REQ_TIMEOUT
tconf.OUTDATED_REQS_CHECK_ENABLED = True
tconf.OUTDATED_REQS_CHECK_INTERVAL = 1
tconf.PROPAGATES_PHASE_REQ_TIMEOUT = 3600
tconf.ORDERING_PHASE_REQ_TIMEOUT = 10
yield tconf
tconf.OUTDATED_REQS_CHECK_ENABLED = OUTDATED_REQS_CHECK_ENABLED_OLD
tconf.OUTDATED_REQS_CHECK_INTERVAL = OUTDATED_REQS_CHECK_INTERVAL_OLD
tconf.PROPAGATES_PHASE_REQ_TIMEOUT = PROPAGATES_PHASE_REQ_TIMEOUT_OLD
tconf.ORDERING_PHASE_REQ_TIMEOUT = ORDERING_PHASE_REQ_TIMEOUT_OLD
@pytest.fixture()
def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
global initial_ledger_size
A, B, C, D = txnPoolNodeSet # type: TestNode
lagged_node = C
frm = [A, B, D]
delay(Commit, frm=frm, to=lagged_node, howlong=howlong)
initial_ledger_size = lagged_node.domainLedger.size
request_couple_json = sdk_send_random_requests(
looper, sdk_pool_handle, sdk_wallet_client, 1)
return request_couple_json
# NOTE: if this test fails intermittently then ORDERING_PHASE_REQ_TIMEOUT
# and Prepares and Commits wait timeouts should be tuned as we can not
# synchronize receiving of Prepares and dropping of the request, for now
# it is controlled just using timeouts so that we can drop the request
# before all Prepares received.
def test_req_drop_on_commit_phase_on_non_primary_and_then_ordered(
tconf, setup, looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
global initial_ledger_size
A, B, C, D = txnPoolNodeSet # type: TestNode
lagged_node = C
def check_propagates():
# Node should have received a request from the client
assert len(recvdRequest(lagged_node)) == 1
# Node should have received a PROPAGATEs
assert len(recvdPropagate(lagged_node)) == 3
# Node should have sent a PROPAGATE
assert len(sentPropagate(lagged_node)) == 1
# Node should have one request in the requests queue
assert len(lagged_node.requests) == 1
timeout = howlong - 2
looper.run(eventually(check_propagates, retryWait=.5, timeout=timeout))
def check_prepares_received():
# Node should have received all Prepares for master instance
assert len(recvdPrepareForInstId(lagged_node, 0)) == 2
assert len(lagged_node.requests) == 1
looper.run(eventually(check_prepares_received, retryWait=.5, timeout=timeout))
def check_drop():
# Node should have not received Commits for master instance
assert len(recvdCommitForInstId(lagged_node, 0)) == 0
# Request object should be dropped by timeout
assert len(lagged_node.requests) == 0
timeout = tconf.ORDERING_PHASE_REQ_TIMEOUT + tconf.OUTDATED_REQS_CHECK_INTERVAL + 1
looper.run(eventually(check_drop, retryWait=.5, timeout=timeout))
for n in txnPoolNodeSet:
n.nodeIbStasher.resetDelays()
def check_commits_received():
# Node should have received all delayed Commits for master instance
assert len(recvdCommitForInstId(lagged_node, 0)) == 3
timeout = howlong * 2
looper.run(eventually(check_commits_received, retryWait=.5, timeout=timeout))
def check_ledger_size():
# The request should be eventually ordered
for n in txnPoolNodeSet:
assert n.domainLedger.size - initial_ledger_size == 1
looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout))
sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
| {
"content_hash": "e54762a5317ba687502292e0803aaadc",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 90,
"avg_line_length": 40.532710280373834,
"alnum_prop": 0.7242333410191376,
"repo_name": "evernym/zeno",
"id": "36046866d2ec007bbf3ab09b7aabc09cf8580ad7",
"size": "4337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/req_drop/test_req_drop_on_commit_phase_non_primary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
} |
"""Script for testing ganeti.tools.ssl_update"""
import unittest
import shutil
import tempfile
import os.path
import OpenSSL
import time
from ganeti import constants
from ganeti import errors
from ganeti import serializer
from ganeti import utils
from ganeti.tools import common
import testutils
class TestGenerateClientCert(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.client_cert = os.path.join(self.tmpdir, "client.pem")
self.server_cert = os.path.join(self.tmpdir, "server.pem")
some_serial_no = int(time.time())
utils.GenerateSelfSignedSslCert(self.server_cert, some_serial_no)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testRegnerateClientCertificate(self):
my_node_name = "mynode.example.com"
data = {constants.NDS_CLUSTER_NAME: "winnie_poohs_cluster",
constants.NDS_NODE_DAEMON_CERTIFICATE: "some_cert",
constants.NDS_NODE_NAME: my_node_name}
common.GenerateClientCertificate(data, Exception,
client_cert=self.client_cert,
signing_cert=self.server_cert)
client_cert_pem = utils.ReadFile(self.client_cert)
server_cert_pem = utils.ReadFile(self.server_cert)
client_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
client_cert_pem)
signing_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
server_cert_pem)
self.assertEqual(client_cert.get_issuer().CN, signing_cert.get_subject().CN)
self.assertEqual(client_cert.get_subject().CN, my_node_name)
class TestLoadData(unittest.TestCase):
def testNoJson(self):
self.assertRaises(errors.ParseError, common.LoadData, Exception, "")
self.assertRaises(errors.ParseError, common.LoadData, Exception, "}")
def testInvalidDataStructure(self):
raw = serializer.DumpJson({
"some other thing": False,
})
self.assertRaises(errors.ParseError, common.LoadData, Exception, raw)
raw = serializer.DumpJson([])
self.assertRaises(errors.ParseError, common.LoadData, Exception, raw)
def testValidData(self):
raw = serializer.DumpJson({})
self.assertEqual(common.LoadData(raw, Exception), {})
class TestVerifyClusterName(unittest.TestCase):
class MyException(Exception):
pass
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoName(self):
self.assertRaises(self.MyException, common.VerifyClusterName,
{}, self.MyException, "cluster_name",
_verify_fn=NotImplemented)
@staticmethod
def _FailingVerify(name):
assert name == "cluster.example.com"
raise errors.GenericError()
def testFailingVerification(self):
data = {
constants.SSHS_CLUSTER_NAME: "cluster.example.com",
}
self.assertRaises(errors.GenericError, common.VerifyClusterName,
data, self.MyException, "cluster_name",
_verify_fn=self._FailingVerify)
class TestVerifyCertificateStrong(testutils.GanetiTestCase):
class MyException(Exception):
pass
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoCert(self):
self.assertRaises(self.MyException, common.VerifyCertificateStrong,
{}, self.MyException, _verify_fn=NotImplemented)
def testVerificationSuccessWithCert(self):
common.VerifyCertificateStrong({
constants.NDS_NODE_DAEMON_CERTIFICATE: "something",
}, self.MyException, _verify_fn=lambda x,y: None)
def testNoPrivateKey(self):
cert_filename = testutils.TestDataFilename("cert1.pem")
cert_pem = utils.ReadFile(cert_filename)
self.assertRaises(self.MyException,
common._VerifyCertificateStrong,
cert_pem, self.MyException, _check_fn=NotImplemented)
def testInvalidCertificate(self):
self.assertRaises(self.MyException,
common._VerifyCertificateStrong,
"Something that's not a certificate",
self.MyException,
_check_fn=NotImplemented)
@staticmethod
def _Check(cert):
assert cert.get_subject()
def testSuccessfulCheck(self):
cert_filename = testutils.TestDataFilename("cert2.pem")
cert_pem = utils.ReadFile(cert_filename)
result = \
common._VerifyCertificateStrong(cert_pem, self.MyException,
_check_fn=self._Check)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, result)
self.assertTrue(cert)
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, result)
self.assertTrue(key)
def testMismatchingKey(self):
cert1_path = testutils.TestDataFilename("cert1.pem")
cert2_path = testutils.TestDataFilename("cert2.pem")
# Extract certificate
cert1 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(cert1_path))
cert1_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert1)
# Extract mismatching key
key2 = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(cert2_path))
key2_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key2)
try:
common._VerifyCertificateStrong(cert1_pem + key2_pem, self.MyException,
_check_fn=NotImplemented)
except self.MyException, err:
self.assertTrue("not signed with given key" in str(err))
else:
self.fail("Exception was not raised")
if __name__ == "__main__":
testutils.GanetiTestProgram()
| {
"content_hash": "7ffbf55d7bcf56fb6158c4781a79e42c",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 80,
"avg_line_length": 33.08064516129032,
"alnum_prop": 0.6567528035104827,
"repo_name": "onponomarev/ganeti",
"id": "0eb7e451bf311f7e11bf332b90cac21d88a06188",
"size": "7511",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/py/ganeti.tools.common_unittest.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2639381"
},
{
"name": "Python",
"bytes": "5967379"
},
{
"name": "Shell",
"bytes": "118007"
}
],
"symlink_target": ""
} |
r"""Saves out a GraphDef containing the architecture of the model.
To use it, run something like this, with a model name defined by slim:
bazel build tensorflow_models/slim:export_inference_graph
bazel-bin/tensorflow_models/slim/export_inference_graph \
--model_name=inception_v3 --output_file=/tmp/inception_v3_inf_graph.pb
If you then want to use the resulting model with your own or pretrained
checkpoints as part of a mobile model, you can run freeze_graph to get a graph
def with the variables inlined as constants using:
bazel build tensorflow/python/tools:freeze_graph
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=/tmp/inception_v3_inf_graph.pb \
--input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \
--input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \
--output_node_names=InceptionV3/Predictions/Reshape_1
The output node names will vary depending on the model, but you can inspect and
estimate them using the summarize_graph tool:
bazel build tensorflow/tools/graph_transforms:summarize_graph
bazel-bin/tensorflow/tools/graph_transforms/summarize_graph \
--in_graph=/tmp/inception_v3_inf_graph.pb
To run the resulting graph in C++, you can look at the label_image sample code:
bazel build tensorflow/examples/label_image:label_image
bazel-bin/tensorflow/examples/label_image/label_image \
--image=${HOME}/Pictures/flowers.jpg \
--input_layer=input \
--output_layer=InceptionV3/Predictions/Reshape_1 \
--graph=/tmp/frozen_inception_v3.pb \
--labels=/tmp/imagenet_slim_labels.txt \
--input_mean=0 \
--input_std=255
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.platform import gfile
from datasets import dataset_factory
from nets import nets_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to save.')
tf.app.flags.DEFINE_boolean(
'is_training', False,
'Whether to save out a training-focused version of the model.')
tf.app.flags.DEFINE_integer(
'image_size', None,
'The image size to use, otherwise use the model default_image_size.')
tf.app.flags.DEFINE_string('dataset_name', 'imagenet',
'The name of the dataset to use with the model.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'output_file', '', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_string(
'dataset_dir', '', 'Directory to save intermediate dataset files to')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.output_file:
raise ValueError('You must supply the path to save to with --output_file')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default() as graph:
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
FLAGS.dataset_dir)
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=FLAGS.is_training)
image_size = FLAGS.image_size or network_fn.default_image_size
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=[1, image_size, image_size, 3])
network_fn(placeholder)
graph_def = graph.as_graph_def()
with gfile.GFile(FLAGS.output_file, 'wb') as f:
f.write(graph_def.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "8e782de0ea901546b3d6e21285218c5e",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 36.18446601941748,
"alnum_prop": 0.7182720686879528,
"repo_name": "erko/nips17-attack",
"id": "da42fb9609bf1cd9cb73ebd0e16fde63c9aa36c6",
"size": "4412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slim/export_inference_graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "46257"
},
{
"name": "Python",
"bytes": "529363"
},
{
"name": "Shell",
"bytes": "20417"
}
],
"symlink_target": ""
} |
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
def __init__(self, ai_settings, screen):
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
self.rect.x = self.rect.width
self.rect.y = self.rect.height
self.x = float(self.rect.x)
def blitme(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.alien_fleet_direct)
self.rect.x = self.x
def check_edges(self):
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= screen_rect.left:
return True
| {
"content_hash": "cb1be60c94ced5f20061c5c0f457279a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 93,
"avg_line_length": 25.823529411764707,
"alnum_prop": 0.6002277904328018,
"repo_name": "waiilaiiz/python_practice",
"id": "b31a234cf49b43e6f8df3621e6995f02c407e61a",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ship_alien/alien.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6957"
},
{
"name": "Python",
"bytes": "25632"
},
{
"name": "Shell",
"bytes": "3718"
}
],
"symlink_target": ""
} |
from thrift.Thrift import TType, TMessageType, TException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
| {
"content_hash": "5fa051afa6dc33a22f1d1f4d4b7f5d36",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 23,
"alnum_prop": 0.8217391304347826,
"repo_name": "jacksonicson/paper.IS2015",
"id": "596ed8f04dcc11b04887daaec5b29dce00b70cd3",
"size": "379",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "control/Control/generated/relay/ttypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "611"
},
{
"name": "C++",
"bytes": "25818"
},
{
"name": "Python",
"bytes": "1465500"
},
{
"name": "R",
"bytes": "35368"
},
{
"name": "Rebol",
"bytes": "1221"
},
{
"name": "Shell",
"bytes": "5715"
},
{
"name": "Thrift",
"bytes": "1346"
}
],
"symlink_target": ""
} |
"""Generic presubmit checks that can be reused by other presubmit checks."""
import os as _os
_HERE = _os.path.dirname(_os.path.abspath(__file__))
### Description checks
def CheckChangeHasTestField(input_api, output_api):
"""Requires that the changelist have a TEST= field."""
if input_api.change.TEST:
return []
else:
return [output_api.PresubmitNotifyResult(
'If this change requires manual test instructions to QA team, add '
'TEST=[instructions].')]
def CheckChangeHasBugField(input_api, output_api):
"""Requires that the changelist have a BUG= field."""
if input_api.change.BUG:
return []
else:
return [output_api.PresubmitNotifyResult(
'If this change has an associated bug, add BUG=[bug number].')]
def CheckChangeHasTestedField(input_api, output_api):
"""Requires that the changelist have a TESTED= field."""
if input_api.change.TESTED:
return []
else:
return [output_api.PresubmitError('Changelist must have a TESTED= field.')]
def CheckChangeHasQaField(input_api, output_api):
"""Requires that the changelist have a QA= field."""
if input_api.change.QA:
return []
else:
return [output_api.PresubmitError('Changelist must have a QA= field.')]
def CheckDoNotSubmitInDescription(input_api, output_api):
"""Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description.
"""
keyword = 'DO NOT ''SUBMIT'
if keyword in input_api.change.DescriptionText():
return [output_api.PresubmitError(
keyword + ' is present in the changelist description.')]
else:
return []
def CheckChangeHasDescription(input_api, output_api):
"""Checks the CL description is not empty."""
text = input_api.change.DescriptionText()
if text.strip() == '':
if input_api.is_committing:
return [output_api.PresubmitError('Add a description to the CL.')]
else:
return [output_api.PresubmitNotifyResult('Add a description to the CL.')]
return []
def CheckChangeWasUploaded(input_api, output_api):
"""Checks that the issue was uploaded before committing."""
if input_api.is_committing and not input_api.change.issue:
return [output_api.PresubmitError(
'Issue wasn\'t uploaded. Please upload first.')]
return []
### Content checks
def CheckDoNotSubmitInFiles(input_api, output_api):
"""Checks that the user didn't add 'DO NOT ''SUBMIT' to any files."""
# We want to check every text file, not just source files.
file_filter = lambda x : x
keyword = 'DO NOT ''SUBMIT'
errors = _FindNewViolationsOfRule(lambda _, line : keyword not in line,
input_api, file_filter)
text = '\n'.join('Found %s in %s' % (keyword, loc) for loc in errors)
if text:
return [output_api.PresubmitError(text)]
return []
def CheckChangeLintsClean(input_api, output_api, source_file_filter=None):
"""Checks that all '.cc' and '.h' files pass cpplint.py."""
_RE_IS_TEST = input_api.re.compile(r'.*tests?.(cc|h)$')
result = []
cpplint = input_api.cpplint
# Access to a protected member _XX of a client class
# pylint: disable=W0212
cpplint._cpplint_state.ResetErrorCounts()
# Justifications for each filter:
#
# - build/include : Too many; fix in the future.
# - build/include_order : Not happening; #ifdefed includes.
# - build/namespace : I'm surprised by how often we violate this rule.
# - readability/casting : Mistakes a whole bunch of function pointer.
# - runtime/int : Can be fixed long term; volume of errors too high
# - runtime/virtual : Broken now, but can be fixed in the future?
# - whitespace/braces : We have a lot of explicit scoping in chrome code.
# - readability/inheritance : Temporary, while the OVERRIDE and FINAL fixup
# is in progress.
cpplint._SetFilters('-build/include,-build/include_order,-build/namespace,'
'-readability/casting,-runtime/int,-runtime/virtual,'
'-whitespace/braces,-readability/inheritance')
# We currently are more strict with normal code than unit tests; 4 and 5 are
# the verbosity level that would normally be passed to cpplint.py through
# --verbose=#. Hopefully, in the future, we can be more verbose.
files = [f.AbsoluteLocalPath() for f in
input_api.AffectedSourceFiles(source_file_filter)]
for file_name in files:
if _RE_IS_TEST.match(file_name):
level = 5
else:
level = 4
cpplint.ProcessFile(file_name, level)
if cpplint._cpplint_state.error_count > 0:
if input_api.is_committing:
res_type = output_api.PresubmitError
else:
res_type = output_api.PresubmitPromptWarning
result = [res_type('Changelist failed cpplint.py check.')]
return result
def CheckChangeHasNoCR(input_api, output_api, source_file_filter=None):
"""Checks no '\r' (CR) character is in any source files."""
cr_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
if '\r' in input_api.ReadFile(f, 'rb'):
cr_files.append(f.LocalPath())
if cr_files:
return [output_api.PresubmitPromptWarning(
'Found a CR character in these files:', items=cr_files)]
return []
def CheckSvnModifiedDirectories(input_api, output_api, source_file_filter=None):
"""Checks for files in svn modified directories.
They will get submitted on accident because svn commits recursively by
default, and that's very dangerous.
"""
if input_api.change.scm != 'svn':
return []
errors = []
current_cl_files = input_api.change.GetModifiedFiles()
all_modified_files = input_api.change.GetAllModifiedFiles()
# Filter out files in the current CL.
modified_files = [f for f in all_modified_files if f not in current_cl_files]
modified_abspaths = [input_api.os_path.abspath(f) for f in modified_files]
for f in input_api.AffectedFiles(file_filter=source_file_filter):
if f.Action() == 'M' and f.IsDirectory():
curpath = f.AbsoluteLocalPath()
bad_files = []
# Check if any of the modified files in other CLs are under curpath.
for i in xrange(len(modified_files)):
abspath = modified_abspaths[i]
if input_api.os_path.commonprefix([curpath, abspath]) == curpath:
bad_files.append(modified_files[i])
if bad_files:
if input_api.is_committing:
error_type = output_api.PresubmitPromptWarning
else:
error_type = output_api.PresubmitNotifyResult
errors.append(error_type(
'Potential accidental commits in changelist %s:' % f.LocalPath(),
items=bad_files))
return errors
def CheckChangeHasOnlyOneEol(input_api, output_api, source_file_filter=None):
"""Checks the files ends with one and only one \n (LF)."""
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
# Check that the file ends in one and only one newline character.
if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'):
eof_files.append(f.LocalPath())
if eof_files:
return [output_api.PresubmitPromptWarning(
'These files should end in one (and only one) newline character:',
items=eof_files)]
return []
def CheckChangeHasNoCrAndHasOnlyOneEol(input_api, output_api,
source_file_filter=None):
"""Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass.
It is faster because it is reading the file only once.
"""
cr_files = []
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
if '\r' in contents:
cr_files.append(f.LocalPath())
# Check that the file ends in one and only one newline character.
if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'):
eof_files.append(f.LocalPath())
outputs = []
if cr_files:
outputs.append(output_api.PresubmitPromptWarning(
'Found a CR character in these files:', items=cr_files))
if eof_files:
outputs.append(output_api.PresubmitPromptWarning(
'These files should end in one (and only one) newline character:',
items=eof_files))
return outputs
def _ReportErrorFileAndLine(filename, line_num, dummy_line):
"""Default error formatter for _FindNewViolationsOfRule."""
return '%s:%s' % (filename, line_num)
def _FindNewViolationsOfRule(callable_rule, input_api, source_file_filter=None,
error_formatter=_ReportErrorFileAndLine):
"""Find all newly introduced violations of a per-line rule (a callable).
Arguments:
callable_rule: a callable taking a file extension and line of input and
returning True if the rule is satisfied and False if there was a problem.
input_api: object to enumerate the affected files.
source_file_filter: a filter to be passed to the input api.
error_formatter: a callable taking (filename, line_number, line) and
returning a formatted error string.
Returns:
A list of the newly-introduced violations reported by the rule.
"""
errors = []
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=source_file_filter):
# For speed, we do two passes, checking first the full file. Shelling out
# to the SCM to determine the changed region can be quite expensive on
# Win32. Assuming that most files will be kept problem-free, we can
# skip the SCM operations most of the time.
extension = str(f.LocalPath()).rsplit('.', 1)[-1]
if all(callable_rule(extension, line) for line in f.NewContents()):
continue # No violation found in full text: can skip considering diff.
for line_num, line in f.ChangedContents():
if not callable_rule(extension, line):
errors.append(error_formatter(f.LocalPath(), line_num, line))
return errors
def CheckChangeHasNoTabs(input_api, output_api, source_file_filter=None):
"""Checks that there are no tab characters in any of the text files to be
submitted.
"""
# In addition to the filter, make sure that makefiles are blacklisted.
if not source_file_filter:
# It's the default filter.
source_file_filter = input_api.FilterSourceFile
def filter_more(affected_file):
basename = input_api.os_path.basename(affected_file.LocalPath())
return (not (basename in ('Makefile', 'makefile') or
basename.endswith('.mk')) and
source_file_filter(affected_file))
tabs = _FindNewViolationsOfRule(lambda _, line : '\t' not in line,
input_api, filter_more)
if tabs:
return [output_api.PresubmitPromptWarning('Found a tab character in:',
long_text='\n'.join(tabs))]
return []
def CheckChangeTodoHasOwner(input_api, output_api, source_file_filter=None):
"""Checks that the user didn't add TODO(name) without an owner."""
unowned_todo = input_api.re.compile('TO''DO[^(]')
errors = _FindNewViolationsOfRule(lambda _, x : not unowned_todo.search(x),
input_api, source_file_filter)
errors = ['Found TO''DO with no owner in ' + x for x in errors]
if errors:
return [output_api.PresubmitPromptWarning('\n'.join(errors))]
return []
def CheckChangeHasNoStrayWhitespace(input_api, output_api,
source_file_filter=None):
"""Checks that there is no stray whitespace at source lines end."""
errors = _FindNewViolationsOfRule(lambda _, line : line.rstrip() == line,
input_api, source_file_filter)
if errors:
return [output_api.PresubmitPromptWarning(
'Found line ending with white spaces in:',
long_text='\n'.join(errors))]
return []
def CheckLongLines(input_api, output_api, maxlen, source_file_filter=None):
"""Checks that there aren't any lines longer than maxlen characters in any of
the text files to be submitted.
"""
maxlens = {
'java': 100,
# This is specifically for Android's handwritten makefiles (Android.mk).
'mk': 200,
'': maxlen,
}
# Language specific exceptions to max line length.
# '.h' is considered an obj-c file extension, since OBJC_EXCEPTIONS are a
# superset of CPP_EXCEPTIONS.
CPP_FILE_EXTS = ('c', 'cc')
CPP_EXCEPTIONS = ('#define', '#endif', '#if', '#include', '#pragma')
JAVA_FILE_EXTS = ('java',)
JAVA_EXCEPTIONS = ('import ', 'package ')
OBJC_FILE_EXTS = ('h', 'm', 'mm')
OBJC_EXCEPTIONS = ('#define', '#endif', '#if', '#import', '#include',
'#pragma')
LANGUAGE_EXCEPTIONS = [
(CPP_FILE_EXTS, CPP_EXCEPTIONS),
(JAVA_FILE_EXTS, JAVA_EXCEPTIONS),
(OBJC_FILE_EXTS, OBJC_EXCEPTIONS),
]
def no_long_lines(file_extension, line):
# Check for language specific exceptions.
if any(file_extension in exts and line.startswith(exceptions)
for exts, exceptions in LANGUAGE_EXCEPTIONS):
return True
file_maxlen = maxlens.get(file_extension, maxlens[''])
# Stupidly long symbols that needs to be worked around if takes 66% of line.
long_symbol = file_maxlen * 2 / 3
# Hard line length limit at 50% more.
extra_maxlen = file_maxlen * 3 / 2
line_len = len(line)
if line_len <= file_maxlen:
return True
if line_len > extra_maxlen:
return False
if any((url in line) for url in ('file://', 'http://', 'https://')):
return True
if 'url(' in line and file_extension == 'css':
return True
if '<include' in line and file_extension in ('css', 'html', 'js'):
return True
return input_api.re.match(
r'.*[A-Za-z][A-Za-z_0-9]{%d,}.*' % long_symbol, line)
def format_error(filename, line_num, line):
return '%s, line %s, %s chars' % (filename, line_num, len(line))
errors = _FindNewViolationsOfRule(no_long_lines, input_api,
source_file_filter,
error_formatter=format_error)
if errors:
msg = 'Found lines longer than %s characters (first 5 shown).' % maxlen
return [output_api.PresubmitPromptWarning(msg, items=errors[:5])]
else:
return []
def CheckLicense(input_api, output_api, license_re, source_file_filter=None,
accept_empty_files=True):
"""Verifies the license header.
"""
license_re = input_api.re.compile(license_re, input_api.re.MULTILINE)
bad_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
if accept_empty_files and not contents:
continue
if not license_re.search(contents):
bad_files.append(f.LocalPath())
if bad_files:
if input_api.is_committing:
res_type = output_api.PresubmitPromptWarning
else:
res_type = output_api.PresubmitNotifyResult
return [res_type(
'License must match:\n%s\n' % license_re.pattern +
'Found a bad license header in these files:', items=bad_files)]
return []
def CheckChangeSvnEolStyle(input_api, output_api, source_file_filter=None):
"""Checks that the source files have svn:eol-style=LF."""
return CheckSvnProperty(input_api, output_api,
'svn:eol-style', 'LF',
input_api.AffectedSourceFiles(source_file_filter))
def CheckSvnForCommonMimeTypes(input_api, output_api):
"""Checks that common binary file types have the correct svn:mime-type."""
output = []
files = input_api.AffectedFiles(include_deletes=False)
def IsExts(x, exts):
path = x.LocalPath()
for extension in exts:
if path.endswith(extension):
return True
return False
def FilterFiles(extension):
return filter(lambda x: IsExts(x, extension), files)
def RunCheck(mime_type, files):
output.extend(CheckSvnProperty(input_api, output_api, 'svn:mime-type',
mime_type, files))
RunCheck('application/pdf', FilterFiles(['.pdf']))
RunCheck('image/bmp', FilterFiles(['.bmp']))
RunCheck('image/gif', FilterFiles(['.gif']))
RunCheck('image/png', FilterFiles(['.png']))
RunCheck('image/jpeg', FilterFiles(['.jpg', '.jpeg', '.jpe']))
RunCheck('image/vnd.microsoft.icon', FilterFiles(['.ico']))
return output
def CheckSvnProperty(input_api, output_api, prop, expected, affected_files):
"""Checks that affected_files files have prop=expected."""
if input_api.change.scm != 'svn':
return []
bad = filter(lambda f: f.Property(prop) != expected, affected_files)
if bad:
if input_api.is_committing:
res_type = output_api.PresubmitError
else:
res_type = output_api.PresubmitNotifyResult
message = 'Run the command: svn pset %s %s \\' % (prop, expected)
return [res_type(message, items=bad)]
return []
### Other checks
def CheckDoNotSubmit(input_api, output_api):
return (
CheckDoNotSubmitInDescription(input_api, output_api) +
CheckDoNotSubmitInFiles(input_api, output_api)
)
def CheckTreeIsOpen(input_api, output_api,
url=None, closed=None, json_url=None):
"""Check whether to allow commit without prompt.
Supports two styles:
1. Checks that an url's content doesn't match a regexp that would mean that
the tree is closed. (old)
2. Check the json_url to decide whether to allow commit without prompt.
Args:
input_api: input related apis.
output_api: output related apis.
url: url to use for regex based tree status.
closed: regex to match for closed status.
json_url: url to download json style status.
"""
if not input_api.is_committing:
return []
try:
if json_url:
connection = input_api.urllib2.urlopen(json_url)
status = input_api.json.loads(connection.read())
connection.close()
if not status['can_commit_freely']:
short_text = 'Tree state is: ' + status['general_state']
long_text = status['message'] + '\n' + json_url
return [output_api.PresubmitError(short_text, long_text=long_text)]
else:
# TODO(bradnelson): drop this once all users are gone.
connection = input_api.urllib2.urlopen(url)
status = connection.read()
connection.close()
if input_api.re.match(closed, status):
long_text = status + '\n' + url
return [output_api.PresubmitError('The tree is closed.',
long_text=long_text)]
except IOError as e:
return [output_api.PresubmitError('Error fetching tree status.',
long_text=str(e))]
return []
def GetUnitTestsInDirectory(
input_api, output_api, directory, whitelist=None, blacklist=None, env=None):
"""Lists all files in a directory and runs them. Doesn't recurse.
It's mainly a wrapper for RunUnitTests. Use whitelist and blacklist to filter
tests accordingly.
"""
unit_tests = []
test_path = input_api.os_path.abspath(
input_api.os_path.join(input_api.PresubmitLocalPath(), directory))
def check(filename, filters):
return any(True for i in filters if input_api.re.match(i, filename))
to_run = found = 0
for filename in input_api.os_listdir(test_path):
found += 1
fullpath = input_api.os_path.join(test_path, filename)
if not input_api.os_path.isfile(fullpath):
continue
if whitelist and not check(filename, whitelist):
continue
if blacklist and check(filename, blacklist):
continue
unit_tests.append(input_api.os_path.join(directory, filename))
to_run += 1
input_api.logging.debug('Found %d files, running %d' % (found, to_run))
if not to_run:
return [
output_api.PresubmitPromptWarning(
'Out of %d files, found none that matched w=%r, b=%r in directory %s'
% (found, whitelist, blacklist, directory))
]
return GetUnitTests(input_api, output_api, unit_tests, env)
def GetUnitTests(input_api, output_api, unit_tests, env=None):
"""Runs all unit tests in a directory.
On Windows, sys.executable is used for unit tests ending with ".py".
"""
# We don't want to hinder users from uploading incomplete patches.
if input_api.is_committing:
message_type = output_api.PresubmitError
else:
message_type = output_api.PresubmitPromptWarning
results = []
for unit_test in unit_tests:
cmd = []
if input_api.platform == 'win32' and unit_test.endswith('.py'):
# Windows needs some help.
cmd = [input_api.python_executable]
cmd.append(unit_test)
if input_api.verbose:
cmd.append('--verbose')
kwargs = {'cwd': input_api.PresubmitLocalPath()}
if env:
kwargs['env'] = env
results.append(input_api.Command(
name=unit_test,
cmd=cmd,
kwargs=kwargs,
message=message_type))
return results
def GetUnitTestsRecursively(input_api, output_api, directory,
whitelist, blacklist):
"""Gets all files in the directory tree (git repo) that match the whitelist.
Restricts itself to only find files within the Change's source repo, not
dependencies.
"""
def check(filename):
return (any(input_api.re.match(f, filename) for f in whitelist) and
not any(input_api.re.match(f, filename) for f in blacklist))
tests = []
to_run = found = 0
for filepath in input_api.change.AllFiles(directory):
found += 1
if check(filepath):
to_run += 1
tests.append(filepath)
input_api.logging.debug('Found %d files, running %d' % (found, to_run))
if not to_run:
return [
output_api.PresubmitPromptWarning(
'Out of %d files, found none that matched w=%r, b=%r in directory %s'
% (found, whitelist, blacklist, directory))
]
return GetUnitTests(input_api, output_api, tests)
def GetPythonUnitTests(input_api, output_api, unit_tests):
"""Run the unit tests out of process, capture the output and use the result
code to determine success.
DEPRECATED.
"""
# We don't want to hinder users from uploading incomplete patches.
if input_api.is_committing:
message_type = output_api.PresubmitError
else:
message_type = output_api.PresubmitNotifyResult
results = []
for unit_test in unit_tests:
# Run the unit tests out of process. This is because some unit tests
# stub out base libraries and don't clean up their mess. It's too easy to
# get subtle bugs.
cwd = None
env = None
unit_test_name = unit_test
# 'python -m test.unit_test' doesn't work. We need to change to the right
# directory instead.
if '.' in unit_test:
# Tests imported in submodules (subdirectories) assume that the current
# directory is in the PYTHONPATH. Manually fix that.
unit_test = unit_test.replace('.', '/')
cwd = input_api.os_path.dirname(unit_test)
unit_test = input_api.os_path.basename(unit_test)
env = input_api.environ.copy()
# At least on Windows, it seems '.' must explicitly be in PYTHONPATH
backpath = [
'.', input_api.os_path.pathsep.join(['..'] * (cwd.count('/') + 1))
]
if env.get('PYTHONPATH'):
backpath.append(env.get('PYTHONPATH'))
env['PYTHONPATH'] = input_api.os_path.pathsep.join((backpath))
cmd = [input_api.python_executable, '-m', '%s' % unit_test]
results.append(input_api.Command(
name=unit_test_name,
cmd=cmd,
kwargs={'env': env, 'cwd': cwd},
message=message_type))
return results
def RunUnitTestsInDirectory(input_api, *args, **kwargs):
"""Run tests in a directory serially.
For better performance, use GetUnitTestsInDirectory and then
pass to input_api.RunTests.
"""
return input_api.RunTests(
GetUnitTestsInDirectory(input_api, *args, **kwargs), False)
def RunUnitTests(input_api, *args, **kwargs):
"""Run tests serially.
For better performance, use GetUnitTests and then pass to
input_api.RunTests.
"""
return input_api.RunTests(GetUnitTests(input_api, *args, **kwargs), False)
def RunPythonUnitTests(input_api, *args, **kwargs):
"""Run python tests in a directory serially.
DEPRECATED
"""
return input_api.RunTests(
GetPythonUnitTests(input_api, *args, **kwargs), False)
def _FetchAllFiles(input_api, white_list, black_list):
"""Hack to fetch all files."""
# We cannot use AffectedFiles here because we want to test every python
# file on each single python change. It's because a change in a python file
# can break another unmodified file.
# Use code similar to InputApi.FilterSourceFile()
def Find(filepath, filters):
for item in filters:
if input_api.re.match(item, filepath):
return True
return False
files = []
path_len = len(input_api.PresubmitLocalPath())
for dirpath, dirnames, filenames in input_api.os_walk(
input_api.PresubmitLocalPath()):
# Passes dirnames in black list to speed up search.
for item in dirnames[:]:
filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
if Find(filepath, black_list):
dirnames.remove(item)
for item in filenames:
filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
if Find(filepath, white_list) and not Find(filepath, black_list):
files.append(filepath)
return files
def GetPylint(input_api, output_api, white_list=None, black_list=None,
disabled_warnings=None, extra_paths_list=None):
"""Run pylint on python files.
The default white_list enforces looking only at *.py files.
"""
white_list = tuple(white_list or ('.*\.py$',))
black_list = tuple(black_list or input_api.DEFAULT_BLACK_LIST)
extra_paths_list = extra_paths_list or []
if input_api.is_committing:
error_type = output_api.PresubmitError
else:
error_type = output_api.PresubmitPromptWarning
# Only trigger if there is at least one python file affected.
def rel_path(regex):
"""Modifies a regex for a subject to accept paths relative to root."""
def samefile(a, b):
# Default implementation for platforms lacking os.path.samefile
# (like Windows).
return input_api.os_path.abspath(a) == input_api.os_path.abspath(b)
samefile = getattr(input_api.os_path, 'samefile', samefile)
if samefile(input_api.PresubmitLocalPath(),
input_api.change.RepositoryRoot()):
return regex
prefix = input_api.os_path.join(input_api.os_path.relpath(
input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()), '')
return input_api.re.escape(prefix) + regex
src_filter = lambda x: input_api.FilterSourceFile(
x, map(rel_path, white_list), map(rel_path, black_list))
if not input_api.AffectedSourceFiles(src_filter):
input_api.logging.info('Skipping pylint: no matching changes.')
return []
extra_args = ['--rcfile=%s' % input_api.os_path.join(_HERE, 'pylintrc')]
if disabled_warnings:
extra_args.extend(['-d', ','.join(disabled_warnings)])
files = _FetchAllFiles(input_api, white_list, black_list)
if not files:
return []
files.sort()
input_api.logging.info('Running pylint on %d files', len(files))
input_api.logging.debug('Running pylint on: %s', files)
# Copy the system path to the environment so pylint can find the right
# imports.
env = input_api.environ.copy()
import sys
env['PYTHONPATH'] = input_api.os_path.pathsep.join(
extra_paths_list + sys.path).encode('utf8')
def GetPylintCmd(files):
# Windows needs help running python files so we explicitly specify
# the interpreter to use. It also has limitations on the size of
# the command-line, so we pass arguments via a pipe.
if len(files) == 1:
description = files[0]
else:
description = '%s files' % len(files)
return input_api.Command(
name='Pylint (%s)' % description,
cmd=[input_api.python_executable,
input_api.os_path.join(_HERE, 'third_party', 'pylint.py'),
'--args-on-stdin'],
kwargs={'env': env, 'stdin': '\n'.join(files + extra_args)},
message=error_type)
# Always run pylint and pass it all the py files at once.
# Passing py files one at time is slower and can produce
# different results. input_api.verbose used to be used
# to enable this behaviour but differing behaviour in
# verbose mode is not desirable.
# Leave this unreachable code in here so users can make
# a quick local edit to diagnose pylint issues more
# easily.
if True:
return [GetPylintCmd(files)]
else:
return map(lambda x: GetPylintCmd([x]), files)
def RunPylint(input_api, *args, **kwargs):
"""Legacy presubmit function.
For better performance, get all tests and then pass to
input_api.RunTests.
"""
return input_api.RunTests(GetPylint(input_api, *args, **kwargs), False)
# TODO(dpranke): Get the host_url from the input_api instead
def CheckRietveldTryJobExecution(dummy_input_api, dummy_output_api,
dummy_host_url, dummy_platforms,
dummy_owner):
# Temporarily 'fix' the check while the Rietveld API is being upgraded to
# something sensible.
return []
def CheckBuildbotPendingBuilds(input_api, output_api, url, max_pendings,
ignored):
try:
connection = input_api.urllib2.urlopen(url)
raw_data = connection.read()
connection.close()
except IOError:
return [output_api.PresubmitNotifyResult('%s is not accessible' % url)]
try:
data = input_api.json.loads(raw_data)
except ValueError:
return [output_api.PresubmitNotifyResult('Received malformed json while '
'looking up buildbot status')]
out = []
for (builder_name, builder) in data.iteritems():
if builder_name in ignored:
continue
if builder.get('state', '') == 'offline':
continue
pending_builds_len = len(builder.get('pending_builds', []))
if pending_builds_len > max_pendings:
out.append('%s has %d build(s) pending' %
(builder_name, pending_builds_len))
if out:
return [output_api.PresubmitPromptWarning(
'Build(s) pending. It is suggested to wait that no more than %d '
'builds are pending.' % max_pendings,
long_text='\n'.join(out))]
return []
def CheckOwners(input_api, output_api, source_file_filter=None,
author_counts_as_owner=True):
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check')]
if not input_api.change.issue:
return [output_api.PresubmitError("OWNERS check failed: this change has "
"no Rietveld issue number, so we can't check it for approvals.")]
needed = 'LGTM from an OWNER'
output = output_api.PresubmitError
else:
needed = 'OWNER reviewers'
output = output_api.PresubmitNotifyResult
affected_files = set([f.LocalPath() for f in
input_api.change.AffectedFiles(file_filter=source_file_filter)])
owners_db = input_api.owners_db
owner_email, reviewers = _RietveldOwnerAndReviewers(
input_api,
owners_db.email_regexp,
approval_needed=input_api.is_committing)
owner_email = owner_email or input_api.change.author_email
if author_counts_as_owner and owner_email:
reviewers_plus_owner = set([owner_email]).union(reviewers)
missing_files = owners_db.files_not_covered_by(affected_files,
reviewers_plus_owner)
else:
missing_files = owners_db.files_not_covered_by(affected_files, reviewers)
if missing_files:
output_list = [
output('Missing %s for these files:\n %s' %
(needed, '\n '.join(sorted(missing_files))))]
if not input_api.is_committing:
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output('Suggested OWNERS: ' +
'(Use "git-cl owners" to interactively select owners.)\n %s' %
('\n '.join(suggested_owners or []))))
return output_list
if input_api.is_committing and not reviewers:
return [output('Missing LGTM from someone other than %s' % owner_email)]
return []
def _GetRietveldIssueProps(input_api, messages):
"""Gets the issue properties from rietveld."""
issue = input_api.change.issue
if issue and input_api.rietveld:
return input_api.rietveld.get_issue_properties(
issue=int(issue), messages=messages)
def _ReviewersFromChange(change):
"""Return the reviewers specified in the |change|, if any."""
reviewers = set()
if change.R:
reviewers.update(set([r.strip() for r in change.R.split(',')]))
if change.TBR:
reviewers.update(set([r.strip() for r in change.TBR.split(',')]))
# Drop reviewers that aren't specified in email address format.
return set(reviewer for reviewer in reviewers if '@' in reviewer)
def _RietveldOwnerAndReviewers(input_api, email_regexp, approval_needed=False):
"""Return the owner and reviewers of a change, if any.
If approval_needed is True, only reviewers who have approved the change
will be returned.
"""
issue_props = _GetRietveldIssueProps(input_api, True)
if not issue_props:
reviewers = set()
if not approval_needed:
reviewers = _ReviewersFromChange(input_api.change)
return None, reviewers
if not approval_needed:
return issue_props['owner_email'], set(issue_props['reviewers'])
owner_email = issue_props['owner_email']
def match_reviewer(r):
return email_regexp.match(r) and r != owner_email
messages = issue_props.get('messages', [])
approvers = set(
m['sender'] for m in messages
if m.get('approval') and match_reviewer(m['sender']))
return owner_email, approvers
def _CheckConstNSObject(input_api, output_api, source_file_filter):
"""Checks to make sure no objective-c files have |const NSSomeClass*|."""
pattern = input_api.re.compile(
r'const\s+NS(?!(Point|Range|Rect|Size)\s*\*)\w*\s*\*')
def objective_c_filter(f):
return (source_file_filter(f) and
input_api.os_path.splitext(f.LocalPath())[1] in ('.h', '.m', '.mm'))
files = []
for f in input_api.AffectedSourceFiles(objective_c_filter):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
if input_api.is_committing:
res_type = output_api.PresubmitPromptWarning
else:
res_type = output_api.PresubmitNotifyResult
return [ res_type('|const NSClass*| is wrong, see ' +
'http://dev.chromium.org/developers/clang-mac',
files) ]
return []
def CheckSingletonInHeaders(input_api, output_api, source_file_filter=None):
"""Checks to make sure no header files have |Singleton<|."""
pattern = input_api.re.compile(r'(?<!class\s)Singleton\s*<')
files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
if (f.LocalPath().endswith('.h') or f.LocalPath().endswith('.hxx') or
f.LocalPath().endswith('.hpp') or f.LocalPath().endswith('.inl')):
contents = input_api.ReadFile(f)
for line in contents.splitlines(False):
if (not input_api.re.match(r'//', line) and # Strip C++ comment.
pattern.search(line)):
files.append(f)
break
if files:
return [ output_api.PresubmitError(
'Found Singleton<T> in the following header files.\n' +
'Please move them to an appropriate source file so that the ' +
'template gets instantiated in a single compilation unit.',
files) ]
return []
def PanProjectChecks(input_api, output_api,
excluded_paths=None, text_files=None,
license_header=None, project_name=None,
owners_check=True, maxlen=80):
"""Checks that ALL chromium orbit projects should use.
These are checks to be run on all Chromium orbit project, including:
Chromium
Native Client
V8
When you update this function, please take this broad scope into account.
Args:
input_api: Bag of input related interfaces.
output_api: Bag of output related interfaces.
excluded_paths: Don't include these paths in common checks.
text_files: Which file are to be treated as documentation text files.
license_header: What license header should be on files.
project_name: What is the name of the project as it appears in the license.
Returns:
A list of warning or error objects.
"""
excluded_paths = tuple(excluded_paths or [])
text_files = tuple(text_files or (
r'.+\.txt$',
r'.+\.json$',
))
project_name = project_name or 'Chromium'
# Accept any year number from 2006 to the current year, or the special
# 2006-20xx string used on the oldest files. 2006-20xx is deprecated, but
# tolerated on old files.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2006, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + '|2006-2008|2006-2009|2006-2010)'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license_header = license_header or (
r'.*? Copyright (\(c\) )?%(year)s The %(project)s Authors\. '
r'All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.(?: \*/)?\n'
) % {
'year': years_re,
'project': project_name,
}
results = []
# This code loads the default black list (e.g. third_party, experimental, etc)
# and add our black list (breakpad, skia and v8 are still not following
# google style and are not really living this repository).
# See presubmit_support.py InputApi.FilterSourceFile for the (simple) usage.
black_list = input_api.DEFAULT_BLACK_LIST + excluded_paths
white_list = input_api.DEFAULT_WHITE_LIST + text_files
sources = lambda x: input_api.FilterSourceFile(x, black_list=black_list)
text_files = lambda x: input_api.FilterSourceFile(
x, black_list=black_list, white_list=white_list)
snapshot_memory = []
def snapshot(msg):
"""Measures & prints performance warning if a rule is running slow."""
dt2 = input_api.time.clock()
if snapshot_memory:
delta_ms = int(1000*(dt2 - snapshot_memory[0]))
if delta_ms > 500:
print " %s took a long time: %dms" % (snapshot_memory[1], delta_ms)
snapshot_memory[:] = (dt2, msg)
if owners_check:
snapshot("checking owners")
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
snapshot("checking long lines")
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api, maxlen, source_file_filter=sources))
snapshot( "checking tabs")
results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
input_api, output_api, source_file_filter=sources))
snapshot( "checking stray whitespace")
results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api, source_file_filter=sources))
snapshot("checking nsobjects")
results.extend(_CheckConstNSObject(
input_api, output_api, source_file_filter=sources))
snapshot("checking singletons")
results.extend(CheckSingletonInHeaders(
input_api, output_api, source_file_filter=sources))
# The following checks are only done on commit, since the commit bot will
# auto-fix most of these.
if input_api.is_committing:
snapshot("checking eol style")
results.extend(input_api.canned_checks.CheckChangeSvnEolStyle(
input_api, output_api, source_file_filter=text_files))
snapshot("checking svn mime types")
results.extend(input_api.canned_checks.CheckSvnForCommonMimeTypes(
input_api, output_api))
snapshot("checking license")
results.extend(input_api.canned_checks.CheckLicense(
input_api, output_api, license_header, source_file_filter=sources))
snapshot("checking was uploaded")
results.extend(input_api.canned_checks.CheckChangeWasUploaded(
input_api, output_api))
snapshot("checking description")
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription(
input_api, output_api))
snapshot("checking do not submit in files")
results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles(
input_api, output_api))
snapshot("done")
return results
def CheckPatchFormatted(input_api, output_api):
import git_cl
cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()]
code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True)
if code == 2:
return [output_api.PresubmitPromptWarning(
'The %s directory requires clang-formatting. '
'Please run git cl format %s' %
(input_api.basename(input_api.PresubmitLocalPath()),
input_api.basename(input_api.PresubmitLocalPath())))]
# As this is just a warning, ignore all other errors if the user
# happens to have a broken clang-format, doesn't use git, etc etc.
return []
| {
"content_hash": "b317a1a5d057b6b038a845f879a4000c",
"timestamp": "",
"source": "github",
"line_count": 1111,
"max_line_length": 80,
"avg_line_length": 37.0963096309631,
"alnum_prop": 0.6654534866792837,
"repo_name": "michalliu/chromium-depot_tools",
"id": "05b045de61fc5b22668322d61265db3d0c04020a",
"size": "41381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presubmit_canned_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5028"
},
{
"name": "CSS",
"bytes": "136"
},
{
"name": "PHP",
"bytes": "586"
},
{
"name": "Python",
"bytes": "1900447"
},
{
"name": "Shell",
"bytes": "101127"
}
],
"symlink_target": ""
} |
"""Tests for Vanderbilt SPC binary sensor platform."""
import asyncio
import pytest
from homeassistant.components.spc import SpcRegistry
from homeassistant.components.binary_sensor import spc
from tests.common import async_test_home_assistant
@pytest.fixture
def hass(loop):
"""Home Assistant fixture with device mapping registry."""
hass = loop.run_until_complete(async_test_home_assistant(loop))
hass.data['spc_registry'] = SpcRegistry()
yield hass
loop.run_until_complete(hass.async_stop())
@asyncio.coroutine
def test_setup_platform(hass):
"""Test autodiscovery of supported device types."""
added_entities = []
zones = {'devices': [{
'id': '1',
'type': '3',
'zone_name': 'Kitchen smoke',
'area': '1',
'area_name': 'House',
'input': '0',
'status': '0',
}, {
'id': '3',
'type': '0',
'zone_name': 'Hallway PIR',
'area': '1',
'area_name': 'House',
'input': '0',
'status': '0',
}, {
'id': '5',
'type': '1',
'zone_name': 'Front door',
'area': '1',
'area_name': 'House',
'input': '1',
'status': '0',
}]}
def add_entities(entities):
nonlocal added_entities
added_entities = list(entities)
yield from spc.async_setup_platform(hass=hass,
config={},
async_add_devices=add_entities,
discovery_info=zones)
assert len(added_entities) == 3
assert added_entities[0].device_class == 'smoke'
assert added_entities[0].state == 'off'
assert added_entities[1].device_class == 'motion'
assert added_entities[1].state == 'off'
assert added_entities[2].device_class == 'opening'
assert added_entities[2].state == 'on'
assert all(d.hidden for d in added_entities)
| {
"content_hash": "afb654d585e8da8eb85ede7333c25f7a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 71,
"avg_line_length": 29.16417910447761,
"alnum_prop": 0.5522006141248721,
"repo_name": "stefan-jonasson/home-assistant",
"id": "5004ccd321049a88dd66f9b173aee3f5c462727b",
"size": "1954",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/binary_sensor/test_spc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
} |
DEPS = [
'checkout',
'docker',
'env',
'infra',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
DOCKER_IMAGE = 'gcr.io/skia-public/gold-karma-chrome-tests:77.0.3865.120_v2'
INNER_KARMA_SCRIPT = 'skia/infra/canvaskit/test_canvaskit.sh'
def RunSteps(api):
api.vars.setup()
checkout_root = api.path['start_dir']
out_dir = api.vars.swarming_out_dir
# The karma script is configured to look in ./canvaskit/bin/ for
# the test files to load, so we must copy them there (see Set up for docker).
copy_dest = checkout_root.join('skia', 'modules', 'canvaskit',
'canvaskit', 'bin')
api.file.ensure_directory('mkdirs copy_dest', copy_dest, mode=0777)
base_dir = api.vars.build_dir
copies = {
base_dir.join('canvaskit.js'): copy_dest.join('canvaskit.js'),
base_dir.join('canvaskit.wasm'): copy_dest.join('canvaskit.wasm'),
}
recursive_read = [checkout_root.join('skia')]
args = [
'--builder', api.vars.builder_name,
'--git_hash', api.properties['revision'],
'--buildbucket_build_id', api.properties.get('buildbucket_build_id', ''),
'--bot_id', api.vars.swarming_bot_id,
'--task_id', api.vars.swarming_task_id,
'--browser', 'Chrome',
'--config', api.vars.configuration,
'--source_type', 'canvaskit',
]
if api.vars.is_trybot:
args.extend([
'--issue', api.vars.issue,
'--patchset', api.vars.patchset,
])
api.docker.run(
name='Test CanvasKit with Docker',
docker_image=DOCKER_IMAGE,
src_dir=checkout_root,
out_dir=out_dir,
script=checkout_root.join(INNER_KARMA_SCRIPT),
args=args,
docker_args=None,
copies=copies,
recursive_read=recursive_read,
attempts=3,
)
def GenTests(api):
yield (
api.test('Test-Debian9-EMCC-GCE-GPU-WEBGL1-wasm-Debug-All-CanvasKit') +
api.properties(buildername=('Test-Debian9-EMCC-GCE-GPU-WEBGL1'
'-wasm-Debug-All-CanvasKit'),
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
yield (
api.test('canvaskit_trybot') +
api.properties(buildername=('Test-Debian9-EMCC-GCE-CPU-AVX2'
'-wasm-Debug-All-CanvasKit'),
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
patch_ref='89/456789/12',
patch_repo='https://skia.googlesource.com/skia.git',
patch_storage='gerrit',
patch_set=7,
patch_issue=1234,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/')
)
| {
"content_hash": "2a346f42dd383bc7c4d26d9bac14790e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 33.924731182795696,
"alnum_prop": 0.5534072900158479,
"repo_name": "HalCanary/skia-hc",
"id": "956e8967e05c82fff53d1c6a727beafed216554c",
"size": "3372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infra/bots/recipes/test_canvaskit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1277297"
},
{
"name": "Batchfile",
"bytes": "865"
},
{
"name": "C",
"bytes": "505166"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "32234337"
},
{
"name": "CMake",
"bytes": "2850"
},
{
"name": "CSS",
"bytes": "3078"
},
{
"name": "Dockerfile",
"bytes": "14764"
},
{
"name": "GLSL",
"bytes": "109164"
},
{
"name": "Go",
"bytes": "135327"
},
{
"name": "HTML",
"bytes": "1321397"
},
{
"name": "Java",
"bytes": "167849"
},
{
"name": "JavaScript",
"bytes": "463920"
},
{
"name": "Lex",
"bytes": "2521"
},
{
"name": "Lua",
"bytes": "70982"
},
{
"name": "Makefile",
"bytes": "13502"
},
{
"name": "Objective-C",
"bytes": "83351"
},
{
"name": "Objective-C++",
"bytes": "366996"
},
{
"name": "PHP",
"bytes": "139510"
},
{
"name": "PowerShell",
"bytes": "1432"
},
{
"name": "Python",
"bytes": "1055437"
},
{
"name": "Shell",
"bytes": "95010"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import types
from django import forms
from patt3rns.forms import widgets as patt3rns_widgets
class CharField(forms.CharField):
def clean(self, value):
if isinstance(value, types.StringTypes):
value = value.strip()
value = super(CharField, self).clean(value)
# Yes we do it again in after the base class runs
if isinstance(value, types.StringTypes):
value = value.strip()
return value
class DateField(forms.DateField):
widget = patt3rns_widgets.DateInput
class DateTimeField(forms.DateTimeField):
widget = patt3rns_widgets.DateTimeInput
class TimeField(forms.TimeField):
widget = patt3rns_widgets.TimeInput
| {
"content_hash": "9d8f9774166c24cf567fed84f0ec1962",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 57,
"avg_line_length": 23.806451612903224,
"alnum_prop": 0.7018970189701897,
"repo_name": "fredpalmer/patt3rns",
"id": "d0581bd5a7c65d19eae976ee09446f157f739403",
"size": "753",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "patt3rns/forms/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2150"
},
{
"name": "HTML",
"bytes": "70485"
},
{
"name": "JavaScript",
"bytes": "26074"
},
{
"name": "Python",
"bytes": "72137"
},
{
"name": "Shell",
"bytes": "1008"
}
],
"symlink_target": ""
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
if not hasattr(dict,'keys'):
dict = type({})(dict) # make mapping from a sequence
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data)
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict):
if isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type(self.data)):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
fromkeys = classmethod(fromkeys)
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other):
# Make progressively weaker assumptions about "other"
if hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, '__iter__'): # iter saves memory
for k in other:
self[k] = other[k]
else:
for k in other.keys():
self[k] = other[k]
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| {
"content_hash": "0b52c8d7cdbfb5c9ef03a3c7464a4375",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 77,
"avg_line_length": 33.207317073170735,
"alnum_prop": 0.5552699228791774,
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"id": "35f86fc4dc1076ad0eeeb862e44d290552657297",
"size": "5446",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Lib/UserDict.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <nsinux@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import calendar
import datetime
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def days_range(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:param wipe: boolean, excludes first and last date from range when True. Default is False.
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def last_day(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), last_day(_arg.year, _arg.month) if clean \
else last_day(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = last_day(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = last_day(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def new_year(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christ_eve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return last_day(month=10) if not year else last_day(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
def vatertag(year=None):
"""
father's day in Germany
"""
return after(easter(year), '39days').date()
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
| {
"content_hash": "ae77dac8cc0be09219dfa37c13b30dc3",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 108,
"avg_line_length": 28.841463414634145,
"alnum_prop": 0.6039112050739958,
"repo_name": "shinux/PyTime",
"id": "b892833dd0ff5f1509bbe2cc769c0379c228893f",
"size": "9501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytime/pytime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30919"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
"""
eve.auth
~~~~~~~~
Allow API endpoints to be secured via BasicAuth and derivates.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from flask import request, Response, current_app as app, g, abort
from functools import wraps
def requires_auth(endpoint_class):
""" Enables Authorization logic for decorated functions.
:param endpoint_class: the 'class' to which the decorated endpoint belongs
to. Can be 'resource' (resource endpoint), 'item'
(item endpoint) and 'home' for the API entry point.
.. versionchanged:: 0.0.7
Passing the 'resource' argument when inoking auth.authenticate()
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing (CORS): 'OPTIONS' request
method is now public by default. The actual method ('GET', etc.) will
still be protected if so configured.
.. versionadded:: 0.0.4
"""
def fdec(f):
@wraps(f)
def decorated(*args, **kwargs):
if args:
# resource or item endpoint
resource_name = args[0]
resource = app.config['DOMAIN'][args[0]]
if endpoint_class == 'resource':
public = resource['public_methods']
roles = list(resource['allowed_roles'])
if request.method in ['GET', 'HEAD', 'OPTIONS']:
roles += resource['allowed_read_roles']
else:
roles += resource['allowed_write_roles']
elif endpoint_class == 'item':
public = resource['public_item_methods']
roles = list(resource['allowed_item_roles'])
if request.method in ['GET', 'HEAD', 'OPTIONS']:
roles += resource['allowed_item_read_roles']
else:
roles += resource['allowed_item_write_roles']
auth = _auth_object(resource_name)
else:
# home
resource_name = resource = None
public = app.config['PUBLIC_METHODS'] + ['OPTIONS']
roles = list(app.config['ALLOWED_ROLES'])
if request.method in ['GET', 'OPTIONS']:
roles += app.config['ALLOWED_READ_ROLES']
else:
roles += app.config['ALLOWED_WRITE_ROLES']
auth = app.auth
if auth and request.method not in public:
if not auth.authorized(roles, resource_name, request.method):
return auth.authenticate()
return f(*args, **kwargs)
return decorated
return fdec
class BasicAuth(object):
""" Implements Basic AUTH logic. Should be subclassed to implement custom
authentication checking.
.. versionchanged:: 0.4
ensure all errors returns a parseable body #366.
auth.request_auth_value replaced with getter and setter methods which
rely on flask's 'g' object, for enhanced thread-safity.
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
.. versionchanged:: 0.0.9
Support for user_id property.
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.4
"""
def set_request_auth_value(self, value):
g.auth_value = value
def get_request_auth_value(self):
return g.get("auth_value")
def check_auth(self, username, password, allowed_roles, resource, method):
""" This function is called to check if a username / password
combination is valid. Must be overridden with custom logic.
:param username: username provided with current request.
:param password: password provided with current request
:param allowed_roles: allowed user roles.
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401 response that enables basic auth.
Override if you want to change the response and/or the realm.
"""
resp = Response(None, 401, {'WWW-Authenticate': 'Basic realm:"%s"' %
__package__})
abort(401, description='Please provide proper credentials',
response=resp)
def authorized(self, allowed_roles, resource, method):
""" Validates the the current request is allowed to pass through.
:param allowed_roles: allowed roles for the current request, can be a
string or a list of roles.
:param resource: resource being requested.
"""
auth = request.authorization
return auth and self.check_auth(auth.username, auth.password,
allowed_roles, resource, method)
class HMACAuth(BasicAuth):
""" Hash Message Authentication Code (HMAC) authentication logic. Must be
subclassed to implement custom authorization checking.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body #366.
.. versionchanged:: 0.0.9
Replaced the now deprecated request.data with request.get_data().
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.5
"""
def check_auth(self, userid, hmac_hash, headers, data, allowed_roles,
resource, method):
""" This function is called to check if a token is valid. Must be
overridden with custom logic.
:param userid: user id included with the request.
:param hmac_hash: hash included with the request.
:param headers: request headers. Suitable for hash computing.
:param data: request data. Suitable for hash computing.
:param allowed_roles: allowed user roles.
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401. Override if you want to change the
response.
"""
abort(401, description='Please provide proper credentials')
def authorized(self, allowed_roles, resource, method):
""" Validates the the current request is allowed to pass through.
:param allowed_roles: allowed roles for the current request, can be a
string or a list of roles.
:param resource: resource being requested.
"""
auth = request.headers.get('Authorization')
try:
userid, hmac_hash = auth.split(':')
except:
auth = None
return auth and self.check_auth(userid, hmac_hash, request.headers,
request.get_data(), allowed_roles,
resource, method)
class TokenAuth(BasicAuth):
""" Implements Token AUTH logic. Should be subclassed to implement custom
authentication checking.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body #366.
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.5
"""
def check_auth(self, token, allowed_roles, resource, method):
""" This function is called to check if a token is valid. Must be
overridden with custom logic.
:param token: decoded user name.
:param allowed_roles: allowed user roles
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401 response that enables basic auth.
Override if you want to change the response and/or the realm.
"""
resp = Response(None, 401, {'WWW-Authenticate': 'Basic realm:"%s"' %
__package__})
abort(401, description='Please provide proper credentials',
response=resp)
def authorized(self, allowed_roles, resource, method):
""" Validates the the current request is allowed to pass through.
:param allowed_roles: allowed roles for the current request, can be a
string or a list of roles.
:param resource: resource being requested.
"""
auth = request.authorization
return auth and self.check_auth(auth.username, allowed_roles, resource,
method)
def auth_field_and_value(resource):
""" If auth is active and the resource requires it, return both the
current request 'request_auth_value' and the 'auth_field' for the resource
.. versionchanged:: 0.4
Use new auth.request_auth_value() method.
.. versionadded:: 0.3
"""
if '|resource' in request.endpoint:
# We are on a resource endpoint and need to check against
# `public_methods`
public_method_list_to_check = 'public_methods'
else:
# We are on an item endpoint and need to check against
# `public_item_methods`
public_method_list_to_check = 'public_item_methods'
resource_dict = app.config['DOMAIN'][resource]
auth = _auth_object(resource)
request_auth_value = auth.get_request_auth_value() if auth else None
auth_field = resource_dict.get('auth_field', None) if request.method not \
in resource_dict[public_method_list_to_check] else None
return auth_field, request_auth_value
def _auth_object(resource):
""" Ensure resource auth is an instance and its state is preserved between
calls.
.. versionadded:: 0.5.2
"""
resource_def = app.config['DOMAIN'][resource]
if callable(resource_def['authentication']):
resource_def['authentication'] = resource_def['authentication']()
return resource_def['authentication']
| {
"content_hash": "4687284461745594269aff24a539b9bf",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 79,
"avg_line_length": 38.428030303030305,
"alnum_prop": 0.601675702316412,
"repo_name": "superdesk/eve",
"id": "0172b0a839098c7e720de067890681c3b739ecfd",
"size": "10170",
"binary": false,
"copies": "1",
"ref": "refs/heads/sync-upstream",
"path": "eve/auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "598974"
}
],
"symlink_target": ""
} |
from keras.models import Sequential
from keras.layers import Dense, Dropout, Convolution1D, MaxPooling1D, Flatten, Embedding
from keras.optimizers import SGD
WEIGHTS_FOLDER_FORMAT = 'data/weights/{}_weights.h5'
class BaseNet:
NAME = 'NoneNet'
def __init__(self, train=False, **kwargs):
self.model = self.build_model(train)
self.history = None
self.epoch = 500 or kwargs.get('epoch')
self.batch_size = 32 or kwargs.get('batch_size')
self.verbose = 0 or kwargs.get('verbose')
def fit(self, *args, **kwargs):
self.history = self.model.fit(*args, **kwargs)
return self.history
def predict(self, *args, **kwargs):
return self.model.predict(*args, **kwargs)
def evaluate(self, *args, **kwargs):
return self.model.evaluate(*args, **kwargs)
def get_weights(self, *args, **kwargs):
return self.model.get_weights(*args, **kwargs)
def set_weights(self, *args, **kwargs):
return self.model.set_weights(*args, **kwargs)
def load_weights(self, *args, **kwargs):
return self.model.load_weights(*args, **kwargs)
def save_weights(self, *args, **kwargs):
return self.model.save_weights(*args, **kwargs)
def save_model(self, filename):
model_name = filename or WEIGHTS_FOLDER_FORMAT.format(self.NAME)
return self.model.save_weights(model_name)
def reset_states(self):
return self.model.reset_states()
@staticmethod
def build_model(self):
raise Exception('Not implemented')
def run(self, train, validate, save=False):
train_x, train_y = train
validate_x, validate_y = validate
self.model.fit(
train_x, train_y,
nb_epoch=self.epoch, batch_size=self.batch_size,
validation_data=(validate_x, validate_y),
verbose=self.verbose)
_, accuracy = self.model.evaluate(validate_x, validate_y, batch_size=32, verbose=0)
if save:
self.save_model()
print('=== %s ===\nTest accuracy: %.2f%%' % (self.NAME, accuracy * 100))
class FC1Net(BaseNet):
NAME = 'FC1Net'
@staticmethod
def build_model(train=True):
model = Sequential()
model.add(Dense(1, input_dim=4096, activation='sigmoid'))
if train:
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
return model
class FC4Net(BaseNet):
NAME = 'FC4Net'
@staticmethod
def build_model(train=True):
model = Sequential()
model.add(Dense(256, input_dim=4096, init='uniform', activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
if train:
model.compile(
loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
class MLPModel(BaseNet):
NAME = 'MLPModel'
@staticmethod
def build_model(train=True):
model = Sequential()
model.add(Dense(256, input_dim=4096, init='uniform', activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(256, init='uniform', activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(256, init='uniform', activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1, init='uniform', activation='sigmoid'))
if train:
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(
loss='binary_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
return model
def run(self, train, validate, save=False):
train_x, train_y = train
validate_x, validate_y = validate
model = self.model
best_accuracy, best_epoch = 0.0, 0
for ep in range(self.epoch):
H = model.fit(
train_x, train_y,
batch_size=self.batch_size, nb_epoch=1,
validation_split=0.2, verbose=self.verbose)
accuracy = H.history['val_acc'][0]
if accuracy > best_accuracy:
best_accuracy = accuracy
best_epoch = ep
best_W = model.get_weights()
model.reset_states()
model.set_weights(best_W)
self.model = model
_, accuracy = self.model.evaluate(validate_x, validate_y, batch_size=32, verbose=0)
print('=== %s ===\nTest accuracy: %.2f%%' % (self.NAME, accuracy * 100))
print('best_accuracy: %f generated at epoch %d' % (best_accuracy, best_epoch))
if save:
self.save_model()
class SaNet(BaseNet):
NAME = 'SaNet'
@staticmethod
def build_model(train=True):
model = Sequential()
model.add(Embedding(2000, 256, input_length=4096))
model.add(Dropout(0.25))
model.add(Convolution1D(128, 10, activation='relu'))
model.add(Convolution1D(128, 5, activation='relu'))
model.add(MaxPooling1D(pool_length=4))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
if train:
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return model
| {
"content_hash": "928488bf869d1449623984367cca6136",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 91,
"avg_line_length": 30.195652173913043,
"alnum_prop": 0.580093592512599,
"repo_name": "NTHU-CVLab/ActivityProps",
"id": "096f24234bb6b7e6e014c5e45543055122256146",
"size": "5556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "network/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "158942"
},
{
"name": "Python",
"bytes": "33538"
}
],
"symlink_target": ""
} |
from .sync import SyncWorker
from .commit_queue import CommitQueue
from .fetch import FetchWorker
| {
"content_hash": "6dfb36bb01139f609bc7205239096ad1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 37,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.8367346938775511,
"repo_name": "ksmaheshkumar/gitfs",
"id": "3601eec6e7a83693932e493dd4293979ba57d84c",
"size": "676",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gitfs/worker/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2945"
},
{
"name": "Python",
"bytes": "249084"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
packages = [
'cullerton.agora',
]
requires = [
'sqlalchemy',
]
setup(
name='cullerton.agora',
version="0.0.2",
packages=find_packages(),
namespace_packages=['cullerton'],
install_requires=requires,
author='mike cullerton',
author_email='michaelc@cullerton.com',
description='A forum for ideas',
url='https://github.com/cullerton/cullerton.agora',
download_url='https://github.com/cullerton/cullerton.agora/tarball/0.0.2',
keywords=['academic', 'simple', 'example'],
classifiers=[],
entry_points="""\
[console_scripts]
initialize_agora_db = cullerton.agora.initialize_db:main
""",
package_data={
'': ['*.txt', '*.rst', '*.ipynb'],
},
)
| {
"content_hash": "810f58345653a4fe77a86f950d28da2f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 24.838709677419356,
"alnum_prop": 0.6311688311688312,
"repo_name": "cullerton/cullerton.agora",
"id": "9b3465f636d8273511035d8030e5acb1344a5178",
"size": "770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8434"
},
{
"name": "Python",
"bytes": "24118"
}
],
"symlink_target": ""
} |
import datetime
from typing import Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class CloudToDeviceProperties(_serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:ivar max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
:ivar default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype default_ttl_as_iso8601: ~datetime.timedelta
:ivar feedback: The properties of the feedback queue for cloud-to-device messages.
:vartype feedback: ~azure.mgmt.iothub.v2017_01_19.models.FeedbackProperties
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
"default_ttl_as_iso8601": {"key": "defaultTtlAsIso8601", "type": "duration"},
"feedback": {"key": "feedback", "type": "FeedbackProperties"},
}
def __init__(
self,
*,
max_delivery_count: Optional[int] = None,
default_ttl_as_iso8601: Optional[datetime.timedelta] = None,
feedback: Optional["_models.FeedbackProperties"] = None,
**kwargs
):
"""
:keyword max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
:keyword default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype default_ttl_as_iso8601: ~datetime.timedelta
:keyword feedback: The properties of the feedback queue for cloud-to-device messages.
:paramtype feedback: ~azure.mgmt.iothub.v2017_01_19.models.FeedbackProperties
"""
super().__init__(**kwargs)
self.max_delivery_count = max_delivery_count
self.default_ttl_as_iso8601 = default_ttl_as_iso8601
self.feedback = feedback
class ErrorDetails(_serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
"code": {"readonly": True},
"http_status_code": {"readonly": True},
"message": {"readonly": True},
"details": {"readonly": True},
}
_attribute_map = {
"code": {"key": "Code", "type": "str"},
"http_status_code": {"key": "HttpStatusCode", "type": "str"},
"message": {"key": "Message", "type": "str"},
"details": {"key": "Details", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupInfo(_serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
:ivar tags: The tags.
:vartype tags: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
id: Optional[str] = None, # pylint: disable=redefined-builtin
name: Optional[str] = None,
**kwargs
):
"""
:keyword tags: The tags.
:paramtype tags: dict[str, str]
:keyword id: The Event Hub-compatible consumer group identifier.
:paramtype id: str
:keyword name: The Event Hub-compatible consumer group name.
:paramtype name: str
"""
super().__init__(**kwargs)
self.tags = tags
self.id = id
self.name = name
class EventHubConsumerGroupsListResult(_serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of Event Hub-compatible consumer group names.
:vartype value: list[str]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[str]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List[str]] = None, **kwargs):
"""
:keyword value: The array of Event Hub-compatible consumer group names.
:paramtype value: list[str]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class EventHubProperties(_serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype retention_time_in_days: int
:ivar partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
"partition_ids": {"readonly": True},
"path": {"readonly": True},
"endpoint": {"readonly": True},
}
_attribute_map = {
"retention_time_in_days": {"key": "retentionTimeInDays", "type": "int"},
"partition_count": {"key": "partitionCount", "type": "int"},
"partition_ids": {"key": "partitionIds", "type": "[str]"},
"path": {"key": "path", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
}
def __init__(
self, *, retention_time_in_days: Optional[int] = None, partition_count: Optional[int] = None, **kwargs
):
"""
:keyword retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype retention_time_in_days: int
:keyword partition_count: The number of partitions for receiving device-to-cloud messages in
the Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype partition_count: int
"""
super().__init__(**kwargs)
self.retention_time_in_days = retention_time_in_days
self.partition_count = partition_count
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(_serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar export_blob_container_uri: The export blob container URI. Required.
:vartype export_blob_container_uri: str
:ivar exclude_keys: The value indicating whether keys should be excluded during export.
Required.
:vartype exclude_keys: bool
"""
_validation = {
"export_blob_container_uri": {"required": True},
"exclude_keys": {"required": True},
}
_attribute_map = {
"export_blob_container_uri": {"key": "ExportBlobContainerUri", "type": "str"},
"exclude_keys": {"key": "ExcludeKeys", "type": "bool"},
}
def __init__(self, *, export_blob_container_uri: str, exclude_keys: bool, **kwargs):
"""
:keyword export_blob_container_uri: The export blob container URI. Required.
:paramtype export_blob_container_uri: str
:keyword exclude_keys: The value indicating whether keys should be excluded during export.
Required.
:paramtype exclude_keys: bool
"""
super().__init__(**kwargs)
self.export_blob_container_uri = export_blob_container_uri
self.exclude_keys = exclude_keys
class FallbackRouteProperties(_serialization.Model):
"""The properties related to the fallback route based on which the IoT hub routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:ivar source: The source to which the routing rule is to be applied to. e.g. DeviceMessages.
Required. Known values are: "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", and
"DeviceJobLifecycleEvents".
:vartype source: str or ~azure.mgmt.iothub.v2017_01_19.models.RoutingSource
:ivar condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: The list of endpoints to which the messages that satisfy the condition
are routed to. Currently only 1 endpoint is allowed. Required.
:vartype endpoint_names: list[str]
:ivar is_enabled: Used to specify whether the fallback route is enabled or not. Required.
:vartype is_enabled: bool
"""
_validation = {
"source": {"required": True},
"endpoint_names": {"required": True, "max_items": 1, "min_items": 1},
"is_enabled": {"required": True},
}
_attribute_map = {
"source": {"key": "source", "type": "str"},
"condition": {"key": "condition", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
"is_enabled": {"key": "isEnabled", "type": "bool"},
}
def __init__(
self,
*,
source: Union[str, "_models.RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword source: The source to which the routing rule is to be applied to. e.g. DeviceMessages.
Required. Known values are: "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", and
"DeviceJobLifecycleEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2017_01_19.models.RoutingSource
:keyword condition: The condition which is evaluated in order to apply the fallback route. If
the condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: The list of endpoints to which the messages that satisfy the condition
are routed to. Currently only 1 endpoint is allowed. Required.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Used to specify whether the fallback route is enabled or not. Required.
:paramtype is_enabled: bool
"""
super().__init__(**kwargs)
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class FeedbackProperties(_serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:ivar lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"lock_duration_as_iso8601": {"key": "lockDurationAsIso8601", "type": "duration"},
"ttl_as_iso8601": {"key": "ttlAsIso8601", "type": "duration"},
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message on
the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
"""
super().__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class ImportDevicesRequest(_serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:ivar input_blob_container_uri: The input blob container URI. Required.
:vartype input_blob_container_uri: str
:ivar output_blob_container_uri: The output blob container URI. Required.
:vartype output_blob_container_uri: str
"""
_validation = {
"input_blob_container_uri": {"required": True},
"output_blob_container_uri": {"required": True},
}
_attribute_map = {
"input_blob_container_uri": {"key": "InputBlobContainerUri", "type": "str"},
"output_blob_container_uri": {"key": "OutputBlobContainerUri", "type": "str"},
}
def __init__(self, *, input_blob_container_uri: str, output_blob_container_uri: str, **kwargs):
"""
:keyword input_blob_container_uri: The input blob container URI. Required.
:paramtype input_blob_container_uri: str
:keyword output_blob_container_uri: The output blob container URI. Required.
:paramtype output_blob_container_uri: str
"""
super().__init__(**kwargs)
self.input_blob_container_uri = input_blob_container_uri
self.output_blob_container_uri = output_blob_container_uri
class IotHubCapacity(_serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: int
:ivar maximum: The maximum number of units.
:vartype maximum: int
:ivar default: The default number of units.
:vartype default: int
:ivar scale_type: The type of the scaling enabled. Known values are: "Automatic", "Manual", and
"None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2017_01_19.models.IotHubScaleType
"""
_validation = {
"minimum": {"readonly": True, "maximum": 1, "minimum": 1},
"maximum": {"readonly": True},
"default": {"readonly": True},
"scale_type": {"readonly": True},
}
_attribute_map = {
"minimum": {"key": "minimum", "type": "int"},
"maximum": {"key": "maximum", "type": "int"},
"default": {"key": "default", "type": "int"},
"scale_type": {"key": "scaleType", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(_serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: The resource location. Required.
:vartype location: str
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True, "pattern": r"^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$"},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword location: The resource location. Required.
:paramtype location: str
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: The resource location. Required.
:vartype location: str
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
:ivar subscriptionid: The subscription identifier. Required.
:vartype subscriptionid: str
:ivar resourcegroup: The name of the resource group that contains the IoT hub. A resource group
name uniquely identifies the resource group within the subscription. Required.
:vartype resourcegroup: str
:ivar etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:vartype etag: str
:ivar properties: The properties of an IoT hub.
:vartype properties: ~azure.mgmt.iothub.v2017_01_19.models.IotHubProperties
:ivar sku: Information about the SKU of the IoT hub. Required.
:vartype sku: ~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuInfo
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True, "pattern": r"^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$"},
"type": {"readonly": True},
"location": {"required": True},
"subscriptionid": {"required": True},
"resourcegroup": {"required": True},
"sku": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"subscriptionid": {"key": "subscriptionid", "type": "str"},
"resourcegroup": {"key": "resourcegroup", "type": "str"},
"etag": {"key": "etag", "type": "str"},
"properties": {"key": "properties", "type": "IotHubProperties"},
"sku": {"key": "sku", "type": "IotHubSkuInfo"},
}
def __init__(
self,
*,
location: str,
subscriptionid: str,
resourcegroup: str,
sku: "_models.IotHubSkuInfo",
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
properties: Optional["_models.IotHubProperties"] = None,
**kwargs
):
"""
:keyword location: The resource location. Required.
:paramtype location: str
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
:keyword subscriptionid: The subscription identifier. Required.
:paramtype subscriptionid: str
:keyword resourcegroup: The name of the resource group that contains the IoT hub. A resource
group name uniquely identifies the resource group within the subscription. Required.
:paramtype resourcegroup: str
:keyword etag: The Etag field is *not* required. If it is provided in the response body, it
must also be provided as a header per the normal ETag convention.
:paramtype etag: str
:keyword properties: The properties of an IoT hub.
:paramtype properties: ~azure.mgmt.iothub.v2017_01_19.models.IotHubProperties
:keyword sku: Information about the SKU of the IoT hub. Required.
:paramtype sku: ~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuInfo
"""
super().__init__(location=location, tags=tags, **kwargs)
self.subscriptionid = subscriptionid
self.resourcegroup = resourcegroup
self.etag = etag
self.properties = properties
self.sku = sku
class IotHubDescriptionListResult(_serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubDescription objects.
:vartype value: list[~azure.mgmt.iothub.v2017_01_19.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubDescription]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubDescription"]] = None, **kwargs):
"""
:keyword value: The array of IotHubDescription objects.
:paramtype value: list[~azure.mgmt.iothub.v2017_01_19.models.IotHubDescription]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubNameAvailabilityInfo(_serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Known values are: "Invalid" and "AlreadyExists".
:vartype reason: str or ~azure.mgmt.iothub.v2017_01_19.models.IotHubNameUnavailabilityReason
:ivar message: The detailed reason message.
:vartype message: str
"""
_validation = {
"name_available": {"readonly": True},
"reason": {"readonly": True},
}
_attribute_map = {
"name_available": {"key": "nameAvailable", "type": "bool"},
"reason": {"key": "reason", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, *, message: Optional[str] = None, **kwargs):
"""
:keyword message: The detailed reason message.
:paramtype message: str
"""
super().__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = message
class IotHubProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:vartype authorization_policies:
list[~azure.mgmt.iothub.v2017_01_19.models.SharedAccessSignatureAuthorizationRule]
:ivar ip_filter_rules: The IP filter rules.
:vartype ip_filter_rules: list[~azure.mgmt.iothub.v2017_01_19.models.IpFilterRule]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:ivar event_hub_endpoints: The Event Hub-compatible endpoint properties. The possible keys to
this dictionary are events and operationsMonitoringEvents. Both of these keys have to be
present in the dictionary while making create or update calls for the IoT hub.
:vartype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2017_01_19.models.EventHubProperties]
:ivar routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:vartype routing: ~azure.mgmt.iothub.v2017_01_19.models.RoutingProperties
:ivar storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:vartype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2017_01_19.models.StorageEndpointProperties]
:ivar messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:vartype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2017_01_19.models.MessagingEndpointProperties]
:ivar enable_file_upload_notifications: If True, file upload notifications are enabled.
:vartype enable_file_upload_notifications: bool
:ivar cloud_to_device: The IoT hub cloud-to-device messaging properties.
:vartype cloud_to_device: ~azure.mgmt.iothub.v2017_01_19.models.CloudToDeviceProperties
:ivar comments: Comments.
:vartype comments: str
:ivar operations_monitoring_properties: The operations monitoring properties for the IoT hub.
The possible keys to the dictionary are Connections, DeviceTelemetry, C2DCommands,
DeviceIdentityOperations, FileUploadOperations, Routes, D2CTwinOperations, C2DTwinOperations,
TwinQueries, JobsOperations, DirectMethods.
:vartype operations_monitoring_properties:
~azure.mgmt.iothub.v2017_01_19.models.OperationsMonitoringProperties
:ivar features: The capabilities and features enabled for the IoT hub. Known values are: "None"
and "DeviceManagement".
:vartype features: str or ~azure.mgmt.iothub.v2017_01_19.models.Capabilities
"""
_validation = {
"provisioning_state": {"readonly": True},
"host_name": {"readonly": True},
}
_attribute_map = {
"authorization_policies": {"key": "authorizationPolicies", "type": "[SharedAccessSignatureAuthorizationRule]"},
"ip_filter_rules": {"key": "ipFilterRules", "type": "[IpFilterRule]"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"host_name": {"key": "hostName", "type": "str"},
"event_hub_endpoints": {"key": "eventHubEndpoints", "type": "{EventHubProperties}"},
"routing": {"key": "routing", "type": "RoutingProperties"},
"storage_endpoints": {"key": "storageEndpoints", "type": "{StorageEndpointProperties}"},
"messaging_endpoints": {"key": "messagingEndpoints", "type": "{MessagingEndpointProperties}"},
"enable_file_upload_notifications": {"key": "enableFileUploadNotifications", "type": "bool"},
"cloud_to_device": {"key": "cloudToDevice", "type": "CloudToDeviceProperties"},
"comments": {"key": "comments", "type": "str"},
"operations_monitoring_properties": {
"key": "operationsMonitoringProperties",
"type": "OperationsMonitoringProperties",
},
"features": {"key": "features", "type": "str"},
}
def __init__(
self,
*,
authorization_policies: Optional[List["_models.SharedAccessSignatureAuthorizationRule"]] = None,
ip_filter_rules: Optional[List["_models.IpFilterRule"]] = None,
event_hub_endpoints: Optional[Dict[str, "_models.EventHubProperties"]] = None,
routing: Optional["_models.RoutingProperties"] = None,
storage_endpoints: Optional[Dict[str, "_models.StorageEndpointProperties"]] = None,
messaging_endpoints: Optional[Dict[str, "_models.MessagingEndpointProperties"]] = None,
enable_file_upload_notifications: Optional[bool] = None,
cloud_to_device: Optional["_models.CloudToDeviceProperties"] = None,
comments: Optional[str] = None,
operations_monitoring_properties: Optional["_models.OperationsMonitoringProperties"] = None,
features: Optional[Union[str, "_models.Capabilities"]] = None,
**kwargs
):
"""
:keyword authorization_policies: The shared access policies you can use to secure a connection
to the IoT hub.
:paramtype authorization_policies:
list[~azure.mgmt.iothub.v2017_01_19.models.SharedAccessSignatureAuthorizationRule]
:keyword ip_filter_rules: The IP filter rules.
:paramtype ip_filter_rules: list[~azure.mgmt.iothub.v2017_01_19.models.IpFilterRule]
:keyword event_hub_endpoints: The Event Hub-compatible endpoint properties. The possible keys
to this dictionary are events and operationsMonitoringEvents. Both of these keys have to be
present in the dictionary while making create or update calls for the IoT hub.
:paramtype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2017_01_19.models.EventHubProperties]
:keyword routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:paramtype routing: ~azure.mgmt.iothub.v2017_01_19.models.RoutingProperties
:keyword storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:paramtype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2017_01_19.models.StorageEndpointProperties]
:keyword messaging_endpoints: The messaging endpoint properties for the file upload
notification queue.
:paramtype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2017_01_19.models.MessagingEndpointProperties]
:keyword enable_file_upload_notifications: If True, file upload notifications are enabled.
:paramtype enable_file_upload_notifications: bool
:keyword cloud_to_device: The IoT hub cloud-to-device messaging properties.
:paramtype cloud_to_device: ~azure.mgmt.iothub.v2017_01_19.models.CloudToDeviceProperties
:keyword comments: Comments.
:paramtype comments: str
:keyword operations_monitoring_properties: The operations monitoring properties for the IoT
hub. The possible keys to the dictionary are Connections, DeviceTelemetry, C2DCommands,
DeviceIdentityOperations, FileUploadOperations, Routes, D2CTwinOperations, C2DTwinOperations,
TwinQueries, JobsOperations, DirectMethods.
:paramtype operations_monitoring_properties:
~azure.mgmt.iothub.v2017_01_19.models.OperationsMonitoringProperties
:keyword features: The capabilities and features enabled for the IoT hub. Known values are:
"None" and "DeviceManagement".
:paramtype features: str or ~azure.mgmt.iothub.v2017_01_19.models.Capabilities
"""
super().__init__(**kwargs)
self.authorization_policies = authorization_policies
self.ip_filter_rules = ip_filter_rules
self.provisioning_state = None
self.host_name = None
self.event_hub_endpoints = event_hub_endpoints
self.routing = routing
self.storage_endpoints = storage_endpoints
self.messaging_endpoints = messaging_endpoints
self.enable_file_upload_notifications = enable_file_upload_notifications
self.cloud_to_device = cloud_to_device
self.comments = comments
self.operations_monitoring_properties = operations_monitoring_properties
self.features = features
class IotHubQuotaMetricInfo(_serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: int
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: int
"""
_validation = {
"name": {"readonly": True},
"current_value": {"readonly": True},
"max_value": {"readonly": True},
}
_attribute_map = {
"name": {"key": "Name", "type": "str"},
"current_value": {"key": "CurrentValue", "type": "int"},
"max_value": {"key": "MaxValue", "type": "int"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(_serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of quota metrics objects.
:vartype value: list[~azure.mgmt.iothub.v2017_01_19.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubQuotaMetricInfo]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubQuotaMetricInfo"]] = None, **kwargs):
"""
:keyword value: The array of quota metrics objects.
:paramtype value: list[~azure.mgmt.iothub.v2017_01_19.models.IotHubQuotaMetricInfo]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuDescription(_serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar sku: Information about the SKU of the IoT hub. Required.
:vartype sku: ~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuInfo
:ivar capacity: IoT Hub capacity information. Required.
:vartype capacity: ~azure.mgmt.iothub.v2017_01_19.models.IotHubCapacity
"""
_validation = {
"resource_type": {"readonly": True},
"sku": {"required": True},
"capacity": {"required": True},
}
_attribute_map = {
"resource_type": {"key": "resourceType", "type": "str"},
"sku": {"key": "sku", "type": "IotHubSkuInfo"},
"capacity": {"key": "capacity", "type": "IotHubCapacity"},
}
def __init__(self, *, sku: "_models.IotHubSkuInfo", capacity: "_models.IotHubCapacity", **kwargs):
"""
:keyword sku: Information about the SKU of the IoT hub. Required.
:paramtype sku: ~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuInfo
:keyword capacity: IoT Hub capacity information. Required.
:paramtype capacity: ~azure.mgmt.iothub.v2017_01_19.models.IotHubCapacity
"""
super().__init__(**kwargs)
self.resource_type = None
self.sku = sku
self.capacity = capacity
class IotHubSkuDescriptionListResult(_serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubSkuDescription.
:vartype value: list[~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubSkuDescription]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubSkuDescription"]] = None, **kwargs):
"""
:keyword value: The array of IotHubSkuDescription.
:paramtype value: list[~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuDescription]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuInfo(_serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the SKU. Required. Known values are: "F1", "S1", "S2", and "S3".
:vartype name: str or ~azure.mgmt.iothub.v2017_01_19.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Known values are: "Free" and "Standard".
:vartype tier: str or ~azure.mgmt.iothub.v2017_01_19.models.IotHubSkuTier
:ivar capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits. Required.
:vartype capacity: int
"""
_validation = {
"name": {"required": True},
"tier": {"readonly": True},
"capacity": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"tier": {"key": "tier", "type": "str"},
"capacity": {"key": "capacity", "type": "int"},
}
def __init__(self, *, name: Union[str, "_models.IotHubSku"], capacity: int, **kwargs):
"""
:keyword name: The name of the SKU. Required. Known values are: "F1", "S1", "S2", and "S3".
:paramtype name: str or ~azure.mgmt.iothub.v2017_01_19.models.IotHubSku
:keyword capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits. Required.
:paramtype capacity: int
"""
super().__init__(**kwargs)
self.name = name
self.tier = None
self.capacity = capacity
class IpFilterRule(_serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: The name of the IP filter rule. Required.
:vartype filter_name: str
:ivar action: The desired action for requests captured by this rule. Required. Known values
are: "Accept" and "Reject".
:vartype action: str or ~azure.mgmt.iothub.v2017_01_19.models.IpFilterActionType
:ivar ip_mask: A string that contains the IP address range in CIDR notation for the rule.
Required.
:vartype ip_mask: str
"""
_validation = {
"filter_name": {"required": True},
"action": {"required": True},
"ip_mask": {"required": True},
}
_attribute_map = {
"filter_name": {"key": "filterName", "type": "str"},
"action": {"key": "action", "type": "str"},
"ip_mask": {"key": "ipMask", "type": "str"},
}
def __init__(self, *, filter_name: str, action: Union[str, "_models.IpFilterActionType"], ip_mask: str, **kwargs):
"""
:keyword filter_name: The name of the IP filter rule. Required.
:paramtype filter_name: str
:keyword action: The desired action for requests captured by this rule. Required. Known values
are: "Accept" and "Reject".
:paramtype action: str or ~azure.mgmt.iothub.v2017_01_19.models.IpFilterActionType
:keyword ip_mask: A string that contains the IP address range in CIDR notation for the rule.
Required.
:paramtype ip_mask: str
"""
super().__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class JobResponse(_serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Known values are: "unknown", "export", "import", "backup",
"readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration", "rebootDevice",
"factoryResetDevice", and "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2017_01_19.models.JobType
:ivar status: The status of the job. Known values are: "unknown", "enqueued", "running",
"completed", "failed", and "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2017_01_19.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
"job_id": {"readonly": True},
"start_time_utc": {"readonly": True},
"end_time_utc": {"readonly": True},
"type": {"readonly": True},
"status": {"readonly": True},
"failure_reason": {"readonly": True},
"status_message": {"readonly": True},
"parent_job_id": {"readonly": True},
}
_attribute_map = {
"job_id": {"key": "jobId", "type": "str"},
"start_time_utc": {"key": "startTimeUtc", "type": "rfc-1123"},
"end_time_utc": {"key": "endTimeUtc", "type": "rfc-1123"},
"type": {"key": "type", "type": "str"},
"status": {"key": "status", "type": "str"},
"failure_reason": {"key": "failureReason", "type": "str"},
"status_message": {"key": "statusMessage", "type": "str"},
"parent_job_id": {"key": "parentJobId", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(_serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of JobResponse objects.
:vartype value: list[~azure.mgmt.iothub.v2017_01_19.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[JobResponse]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.JobResponse"]] = None, **kwargs):
"""
:keyword value: The array of JobResponse objects.
:paramtype value: list[~azure.mgmt.iothub.v2017_01_19.models.JobResponse]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class MessagingEndpointProperties(_serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:ivar lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype max_delivery_count: int
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"lock_duration_as_iso8601": {"key": "lockDurationAsIso8601", "type": "duration"},
"ttl_as_iso8601": {"key": "ttlAsIso8601", "type": "duration"},
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message.
See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype max_delivery_count: int
"""
super().__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class OperationInputs(_serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the IoT hub to check. Required.
:vartype name: str
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "Name", "type": "str"},
}
def __init__(self, *, name: str, **kwargs):
"""
:keyword name: The name of the IoT hub to check. Required.
:paramtype name: str
"""
super().__init__(**kwargs)
self.name = name
class OperationsMonitoringProperties(_serialization.Model):
"""The operations monitoring properties for the IoT hub. The possible keys to the dictionary are Connections, DeviceTelemetry, C2DCommands, DeviceIdentityOperations, FileUploadOperations, Routes, D2CTwinOperations, C2DTwinOperations, TwinQueries, JobsOperations, DirectMethods.
:ivar events: Dictionary of :code:`<OperationMonitoringLevel>`.
:vartype events: dict[str, str or
~azure.mgmt.iothub.v2017_01_19.models.OperationMonitoringLevel]
"""
_attribute_map = {
"events": {"key": "events", "type": "{str}"},
}
def __init__(self, *, events: Optional[Dict[str, Union[str, "_models.OperationMonitoringLevel"]]] = None, **kwargs):
"""
:keyword events: Dictionary of :code:`<OperationMonitoringLevel>`.
:paramtype events: dict[str, str or
~azure.mgmt.iothub.v2017_01_19.models.OperationMonitoringLevel]
"""
super().__init__(**kwargs)
self.events = events
class RegistryStatistics(_serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: int
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: int
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: int
"""
_validation = {
"total_device_count": {"readonly": True},
"enabled_device_count": {"readonly": True},
"disabled_device_count": {"readonly": True},
}
_attribute_map = {
"total_device_count": {"key": "totalDeviceCount", "type": "int"},
"enabled_device_count": {"key": "enabledDeviceCount", "type": "int"},
"disabled_device_count": {"key": "disabledDeviceCount", "type": "int"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteProperties(_serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique. Required.
:vartype name: str
:ivar source: The source that the routing rule is to be applied to, such as DeviceMessages.
Required. Known values are: "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", and
"DeviceJobLifecycleEvents".
:vartype source: str or ~azure.mgmt.iothub.v2017_01_19.models.RoutingSource
:ivar condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: The list of endpoints to which messages that satisfy the condition are
routed. Currently only one endpoint is allowed. Required.
:vartype endpoint_names: list[str]
:ivar is_enabled: Used to specify whether a route is enabled. Required.
:vartype is_enabled: bool
"""
_validation = {
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
"source": {"required": True},
"endpoint_names": {"required": True, "max_items": 1, "min_items": 1},
"is_enabled": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"source": {"key": "source", "type": "str"},
"condition": {"key": "condition", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
"is_enabled": {"key": "isEnabled", "type": "bool"},
}
def __init__(
self,
*,
name: str,
source: Union[str, "_models.RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
Required.
:paramtype name: str
:keyword source: The source that the routing rule is to be applied to, such as DeviceMessages.
Required. Known values are: "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", and
"DeviceJobLifecycleEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2017_01_19.models.RoutingSource
:keyword condition: The condition that is evaluated to apply the routing rule. If no condition
is provided, it evaluates to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: The list of endpoints to which messages that satisfy the condition are
routed. Currently only one endpoint is allowed. Required.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Used to specify whether a route is enabled. Required.
:paramtype is_enabled: bool
"""
super().__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class RoutingEndpoints(_serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:ivar service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_queues:
list[~azure.mgmt.iothub.v2017_01_19.models.RoutingServiceBusQueueEndpointProperties]
:ivar service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_topics:
list[~azure.mgmt.iothub.v2017_01_19.models.RoutingServiceBusTopicEndpointProperties]
:ivar event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:vartype event_hubs: list[~azure.mgmt.iothub.v2017_01_19.models.RoutingEventHubProperties]
"""
_attribute_map = {
"service_bus_queues": {"key": "serviceBusQueues", "type": "[RoutingServiceBusQueueEndpointProperties]"},
"service_bus_topics": {"key": "serviceBusTopics", "type": "[RoutingServiceBusTopicEndpointProperties]"},
"event_hubs": {"key": "eventHubs", "type": "[RoutingEventHubProperties]"},
}
def __init__(
self,
*,
service_bus_queues: Optional[List["_models.RoutingServiceBusQueueEndpointProperties"]] = None,
service_bus_topics: Optional[List["_models.RoutingServiceBusTopicEndpointProperties"]] = None,
event_hubs: Optional[List["_models.RoutingEventHubProperties"]] = None,
**kwargs
):
"""
:keyword service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:paramtype service_bus_queues:
list[~azure.mgmt.iothub.v2017_01_19.models.RoutingServiceBusQueueEndpointProperties]
:keyword service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes
the messages to, based on the routing rules.
:paramtype service_bus_topics:
list[~azure.mgmt.iothub.v2017_01_19.models.RoutingServiceBusTopicEndpointProperties]
:keyword event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:paramtype event_hubs: list[~azure.mgmt.iothub.v2017_01_19.models.RoutingEventHubProperties]
"""
super().__init__(**kwargs)
self.service_bus_queues = service_bus_queues
self.service_bus_topics = service_bus_topics
self.event_hubs = event_hubs
class RoutingEventHubProperties(_serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the event hub endpoint. Required.
:vartype connection_string: str
:ivar name: The name of the event hub endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved; events, operationsMonitoringEvents, fileNotifications, $default.
Endpoint names must be unique across endpoint types. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the event hub endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the event hub endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: The connection string of the event hub endpoint. Required.
:paramtype connection_string: str
:keyword name: The name of the event hub endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved; events, operationsMonitoringEvents, fileNotifications, $default.
Endpoint names must be unique across endpoint types. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the event hub endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the event hub endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingProperties(_serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:ivar endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:vartype endpoints: ~azure.mgmt.iothub.v2017_01_19.models.RoutingEndpoints
:ivar routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:vartype routes: list[~azure.mgmt.iothub.v2017_01_19.models.RouteProperties]
:ivar fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:vartype fallback_route: ~azure.mgmt.iothub.v2017_01_19.models.FallbackRouteProperties
"""
_attribute_map = {
"endpoints": {"key": "endpoints", "type": "RoutingEndpoints"},
"routes": {"key": "routes", "type": "[RouteProperties]"},
"fallback_route": {"key": "fallbackRoute", "type": "FallbackRouteProperties"},
}
def __init__(
self,
*,
endpoints: Optional["_models.RoutingEndpoints"] = None,
routes: Optional[List["_models.RouteProperties"]] = None,
fallback_route: Optional["_models.FallbackRouteProperties"] = None,
**kwargs
):
"""
:keyword endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:paramtype endpoints: ~azure.mgmt.iothub.v2017_01_19.models.RoutingEndpoints
:keyword routes: The list of user-provided routing rules that the IoT hub uses to route
messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid
hubs and a maximum of 5 routing rules are allowed for free hubs.
:paramtype routes: list[~azure.mgmt.iothub.v2017_01_19.models.RouteProperties]
:keyword fallback_route: The properties of the route that is used as a fall-back route when
none of the conditions specified in the 'routes' section are met. This is an optional
parameter. When this property is not set, the messages which do not meet any of the conditions
specified in the 'routes' section get routed to the built-in eventhub endpoint.
:paramtype fallback_route: ~azure.mgmt.iothub.v2017_01_19.models.FallbackRouteProperties
"""
super().__init__(**kwargs)
self.endpoints = endpoints
self.routes = routes
self.fallback_route = fallback_route
class RoutingServiceBusQueueEndpointProperties(_serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the service bus queue endpoint. Required.
:vartype connection_string: str
:ivar name: The name of the service bus queue endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved; events, operationsMonitoringEvents, fileNotifications, $default.
Endpoint names must be unique across endpoint types. The name need not be the same as the
actual queue name. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus queue endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus queue endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: The connection string of the service bus queue endpoint. Required.
:paramtype connection_string: str
:keyword name: The name of the service bus queue endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved; events, operationsMonitoringEvents,
fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need
not be the same as the actual queue name. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus queue endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus queue endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingServiceBusTopicEndpointProperties(_serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the service bus topic endpoint. Required.
:vartype connection_string: str
:ivar name: The name of the service bus topic endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved; events, operationsMonitoringEvents, fileNotifications, $default.
Endpoint names must be unique across endpoint types. The name need not be the same as the
actual topic name. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus topic endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus topic endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: The connection string of the service bus topic endpoint. Required.
:paramtype connection_string: str
:keyword name: The name of the service bus topic endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved; events, operationsMonitoringEvents,
fileNotifications, $default. Endpoint names must be unique across endpoint types. The name
need not be the same as the actual topic name. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus topic endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus topic endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class SharedAccessSignatureAuthorizationRule(_serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:ivar key_name: The name of the shared access policy. Required.
:vartype key_name: str
:ivar primary_key: The primary key.
:vartype primary_key: str
:ivar secondary_key: The secondary key.
:vartype secondary_key: str
:ivar rights: The permissions assigned to the shared access policy. Required. Known values are:
"RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
and "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:vartype rights: str or ~azure.mgmt.iothub.v2017_01_19.models.AccessRights
"""
_validation = {
"key_name": {"required": True},
"rights": {"required": True},
}
_attribute_map = {
"key_name": {"key": "keyName", "type": "str"},
"primary_key": {"key": "primaryKey", "type": "str"},
"secondary_key": {"key": "secondaryKey", "type": "str"},
"rights": {"key": "rights", "type": "str"},
}
def __init__(
self,
*,
key_name: str,
rights: Union[str, "_models.AccessRights"],
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
**kwargs
):
"""
:keyword key_name: The name of the shared access policy. Required.
:paramtype key_name: str
:keyword primary_key: The primary key.
:paramtype primary_key: str
:keyword secondary_key: The secondary key.
:paramtype secondary_key: str
:keyword rights: The permissions assigned to the shared access policy. Required. Known values
are: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
and "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:paramtype rights: str or ~azure.mgmt.iothub.v2017_01_19.models.AccessRights
"""
super().__init__(**kwargs)
self.key_name = key_name
self.primary_key = primary_key
self.secondary_key = secondary_key
self.rights = rights
class SharedAccessSignatureAuthorizationRuleListResult(_serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shared access policies.
:vartype value:
list[~azure.mgmt.iothub.v2017_01_19.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[SharedAccessSignatureAuthorizationRule]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.SharedAccessSignatureAuthorizationRule"]] = None, **kwargs):
"""
:keyword value: The list of shared access policies.
:paramtype value:
list[~azure.mgmt.iothub.v2017_01_19.models.SharedAccessSignatureAuthorizationRule]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class StorageEndpointProperties(_serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:ivar sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:vartype sas_ttl_as_iso8601: ~datetime.timedelta
:ivar connection_string: The connection string for the Azure Storage account to which files are
uploaded. Required.
:vartype connection_string: str
:ivar container_name: The name of the root container where you upload files. The container need
not exist but should be creatable using the connectionString specified. Required.
:vartype container_name: str
"""
_validation = {
"connection_string": {"required": True},
"container_name": {"required": True},
}
_attribute_map = {
"sas_ttl_as_iso8601": {"key": "sasTtlAsIso8601", "type": "duration"},
"connection_string": {"key": "connectionString", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
container_name: str,
sas_ttl_as_iso8601: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:paramtype sas_ttl_as_iso8601: ~datetime.timedelta
:keyword connection_string: The connection string for the Azure Storage account to which files
are uploaded. Required.
:paramtype connection_string: str
:keyword container_name: The name of the root container where you upload files. The container
need not exist but should be creatable using the connectionString specified. Required.
:paramtype container_name: str
"""
super().__init__(**kwargs)
self.sas_ttl_as_iso8601 = sas_ttl_as_iso8601
self.connection_string = connection_string
self.container_name = container_name
| {
"content_hash": "00d22b7d26e815435c2e2f80335c77cb",
"timestamp": "",
"source": "github",
"line_count": 1752,
"max_line_length": 283,
"avg_line_length": 43.43550228310502,
"alnum_prop": 0.6537799445459205,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1992b2788ec3ce9182032fdc7de3d668e2863c66",
"size": "76600",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2017_01_19/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django import forms
class ImageUploadForm(forms.Form):
"""Image upload form."""
image = forms.ImageField()
nombre = forms.CharField()
| {
"content_hash": "1978708fed84c20cd1637db8a484fa90",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 34,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.6862745098039216,
"repo_name": "roviol/recog",
"id": "b05952e4971a9a382937cbaac4e3a322d18add00",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reconpath/reconloc/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14313"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface"
_path_str = "isosurface.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.isosurface.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.isosurface.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.isosurface.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.isosurface.col
orbar.tickformatstopdefaults), sets the default property values
to use for elements of isosurface.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.isosurface.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.isosurface.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use isosurface.colorbar.title.font instead.
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use isosurface.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.isosurface.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.isosur
face.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
isosurface.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.isosurface.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use isosurface.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use isosurface.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.isosurface.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.isosur
face.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
isosurface.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.isosurface.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use isosurface.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use isosurface.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "0a32bec82a6f7b277ae44c841bb03b02",
"timestamp": "",
"source": "github",
"line_count": 1940,
"max_line_length": 96,
"avg_line_length": 35.81958762886598,
"alnum_prop": 0.5536623974672614,
"repo_name": "plotly/python-api",
"id": "c7dd67ba652d30c42f7c74ae915651b281bed84e",
"size": "69490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/isosurface/_colorbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
"""Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except OSError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Futhermore, the compiler that can be used varies between
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if data and 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explicitly
# overriden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system(
"""echo 'int main{};' | """
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
if status:
# The compile failed for some reason. Because of differences
# across Xcode and compiler versions, there is no reliable way
# to be sure why it failed. Assume here it was due to lack of
# PPC support and remove the related '-arch' flags from each
# config variables not explicitly overriden by an environment
# variable. If the error was for some other reason, we hope the
# failure will show up again when trying to compile an extension
# module.
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalone Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if macrelease:
try:
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
except ValueError:
macrelease = (10, 0)
else:
# assume no universal support
macrelease = (10, 0)
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
=======
"""Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except OSError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Futhermore, the compiler that can be used varies between
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if data and 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explicitly
# overriden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system(
"""echo 'int main{};' | """
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
if status:
# The compile failed for some reason. Because of differences
# across Xcode and compiler versions, there is no reliable way
# to be sure why it failed. Assume here it was due to lack of
# PPC support and remove the related '-arch' flags from each
# config variables not explicitly overriden by an environment
# variable. If the error was for some other reason, we hope the
# failure will show up again when trying to compile an extension
# module.
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalone Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if macrelease:
try:
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
except ValueError:
macrelease = (10, 0)
else:
# assume no universal support
macrelease = (10, 0)
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except OSError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Futhermore, the compiler that can be used varies between
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if data and 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explicitly
# overriden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system(
"""echo 'int main{};' | """
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
if status:
# The compile failed for some reason. Because of differences
# across Xcode and compiler versions, there is no reliable way
# to be sure why it failed. Assume here it was due to lack of
# PPC support and remove the related '-arch' flags from each
# config variables not explicitly overriden by an environment
# variable. If the error was for some other reason, we hope the
# failure will show up again when trying to compile an extension
# module.
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalone Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if macrelease:
try:
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
except ValueError:
macrelease = (10, 0)
else:
# assume no universal support
macrelease = (10, 0)
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "070d6f23517515628f9f814e33642146",
"timestamp": "",
"source": "github",
"line_count": 1512,
"max_line_length": 80,
"avg_line_length": 37.991402116402114,
"alnum_prop": 0.6125724631373709,
"repo_name": "ArcherSys/ArcherSys",
"id": "5ede43327f0661c4d381941737dd1b42f372e2b3",
"size": "57443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/_osx_support.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
__all__ = ('SrdpToolRunner',)
import sys, os, uuid, json, pkg_resources, argparse
from twisted.python import log
from twisted.internet.serialport import SerialPort
from _version import __version__
from srdp import SrdpStreamProtocol, SrdpDatagramProtocol
from eds import SrdpEdsDatabase
from srdptoolprovider import SrdpToolProvider
class SerialPortFix(SerialPort):
"""
Workaround for the following issue on Windows:
http://twistedmatrix.com/trac/ticket/1248
http://stackoverflow.com/a/287293/884770
"""
def __init__(self, *args, **kw):
super(SerialPortFix, self).__init__(*args, **kw)
self._tempDataBuffer = []
def writeSomeData(self, data):
return len(data)
class SrdpToolRunner(object):
def argparse(self):
## parse command line args
##
parser = argparse.ArgumentParser(prog = "srdptool",
description = "SRDP Tool v%s" % __version__)
group0 = parser.add_argument_group(title = 'SRDP transport and EDS directories')
group0.add_argument("-t",
"--transport",
nargs = 2,
metavar = ('<transport>', '<transport parameters>'),
action = "store",
help = "SRDP transport to use. Eg. 'serial /dev/ttxACM0:19200' or 'serial com3:115200'")
group0.add_argument("-e",
"--eds",
type = str,
action = "append",
metavar = "<directory path>",
help = "Path to EDS directory.")
group1dummy = parser.add_argument_group(title = 'Run mode (one of the following)')
group1 = group1dummy.add_mutually_exclusive_group(required = True)
group1.add_argument("--check",
help = "Load and check the EDS database.",
action = "store_true")
group1.add_argument("--list",
help = "List the devices currently connected to the adapter.",
action = "store_true")
group1.add_argument("--show",
help = "Show information for given device.",
metavar = "<device>",
type = int,
action = "store")
group1.add_argument("--read",
help = "Read current register values for given device (for all register that allow 'read' access).",
metavar = "<device>",
type = int,
action = "store")
group1.add_argument("--monitor",
help = "Monitor the given device for notify events.",
metavar = "<device>",
type = int,
action = "store")
group1.add_argument("--uuid",
type = int,
help = "Generate given number of UUIDs.",
metavar = "<count>",
action = "store")
group2 = parser.add_argument_group(title = 'Register writing (optional)')
group2.add_argument("--write",
action = "append",
metavar = ('<register>', '<value>'),
nargs = 2,
help = "Write register values before main action. Register can be specified either by index or path.")
group3 = parser.add_argument_group(title = 'Other options')
group3.add_argument("--delay",
help = "Delay to wait for (serial) device to get ready (seconds|float).",
type = float,
default = 1.0,
action = "store")
group3.add_argument("--linelength",
type = int,
default = 120,
metavar = "<line length>",
help = "Truncate display line length to given number of chars.")
group3.add_argument("-d",
"--debug",
help = "Enable debug output.",
action = "store_true")
args = parser.parse_args()
## debug output
##
debug = args.debug
if debug:
log.startLogging(sys.stdout)
## delay main action
##
delay = args.delay
## load EDS files from these directories
##
edsDirectories = []
if args.eds:
for e in args.eds:
edsDirectories.append(os.path.abspath(e))
edsDirectories.append(pkg_resources.resource_filename("srdp", "eds"))
## truncate line length in shell output
##
linelength = int(args.linelength)
## write these values to register before main action
##
write = None
if args.write:
write = []
for reg, val in args.write:
try:
reg = int(reg)
except:
try:
reg = str(reg)
except Exception, e:
raise e
try:
val = json.loads(val)
except Exception, e:
raise e
write.append([reg, val])
## SRDP transport
##
if args.transport and len(args.transport) > 0:
transport = args.transport[0].strip().lower()
else:
transport = None
host = None
port = None
baudrate = None
if transport == 'serial':
s = args.transport[1].split(':')
port = s[0].strip()
try:
port = int(port)
except:
# on RaspberryPi, Serial-over-USB appears as /dev/ttyACM0
pass
baudrate = 115200
if len(s) > 1:
baudrate = int(s[1])
if baudrate not in [300, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 57600, 115200, 230400]:
raise Exception("invalid baudrate")
elif transport == 'udp':
s = args.transport[1].split(':')
host = s[0].strip().lower()
port = 1910
if len(s) > 1:
port = int(s[1])
elif transport is None:
pass
else:
raise Exception("invalid transport %s" % transport)
## run mode
##
mode = None
modeArg = None
if args.uuid:
mode = 'uuid'
modeArg = int(args.uuid)
elif args.check:
mode = 'check'
elif args.list:
mode = 'list'
elif args.show:
mode = 'show'
modeArg = int(args.show)
elif args.read:
mode = 'read'
modeArg = int(args.read)
elif args.monitor:
mode = 'monitor'
modeArg = int(args.monitor)
else:
raise Exception("logic error")
config = {}
config['debug'] = debug
config['transport'] = transport
config['mode'] = mode
config['modearg'] = modeArg
config['delay'] = delay
config['edsdirs'] = edsDirectories
config['write'] = write
config['host'] = host
config['port'] = port
config['baudrate'] = baudrate
config['linelength'] = linelength
return config
def startService(self, config, reactor):
## do it ..
##
if config['mode'] == 'uuid':
def splitlen(seq, length):
## Splits a string into fixed size parts.
return [seq[i:i+length] for i in range(0, len(seq), length)]
for i in xrange(config['modearg']):
u = uuid.uuid4()
print
print "UUID :", u
print "HEX :", u.hex
print "C/C++ :", '{' + ', '.join(['0x' + x for x in splitlen(u.hex, 2)]) + '}'
return False
elif config['mode'] in ['check', 'list', 'show', 'read', 'monitor']:
edsDb = SrdpEdsDatabase(debug = config['debug'])
total = 0
for d in config['edsdirs']:
l = edsDb.loadFromDir(d)
total += l
print "Ok: loaded and checked %d EDS files from %s" % (l, d)
l = edsDb.check()
print "EDS database with %d objects initiated." % l
if config['mode'] == 'check':
return False
## complex modes ..
##
srdptool = SrdpToolProvider(config = config, edsDb = edsDb, debug = config['debug'])
if config['transport'] == 'serial':
print "SRDP-over-Serial - connecting to %s at %d baud .." % (config['port'], config['baudrate'])
protocol = SrdpStreamProtocol(provider = srdptool, debug = config['debug'])
serialPort = SerialPortFix(protocol, config['port'], reactor, baudrate = config['baudrate'])
elif config['transport'] == 'udp':
print "SRDP-over-UDP - connecting to %s:%d .." % (config['host'], config['port'])
protocol = SrdpDatagramProtocol(provider = srdptool, addr = (config['host'], config['port']), debug = config['debug'])
reactor.listenUDP(config['port'], protocol)
else:
raise Exception("logic error")
return True
else:
raise Exception("logic error")
| {
"content_hash": "8ebc05f1519935d87657374fcea9e404",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 130,
"avg_line_length": 31.728813559322035,
"alnum_prop": 0.496474358974359,
"repo_name": "oberstet/SRDP",
"id": "92111365abd331ce83e8c91ccc8790e2c243a55f",
"size": "10126",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/srdp/srdp/srdptoolrunner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "15411"
},
{
"name": "C",
"bytes": "37699"
},
{
"name": "C++",
"bytes": "9073"
},
{
"name": "Python",
"bytes": "67120"
}
],
"symlink_target": ""
} |
from django.db import models
import datetime
class User(models.Model):
username = models.CharField(max_length=50)
password = models.CharField(max_length=200)
nickname = models.CharField(max_length=10)
email = models.EmailField()
phone = models.CharField(max_length=20)
create_time = models.DateTimeField(default=datetime.datetime.now())
last_login_time = models.DateTimeField(null=True)
is_active = models.BooleanField(default=True)
def __unicode__(self):
return self.username
def is_authenticated(self):
return True
class PermissionGroup(models.Model):
name = models.CharField(max_length=200)
module = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Permission(models.Model):
name = models.CharField(max_length=200)
action = models.CharField(max_length=200)
action_group = models.ForeignKey(PermissionGroup)
def __unicode__(self):
return self.name
class Menu(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=255)
parent_menu = models.IntegerField()
url = models.CharField(max_length=255)
sort = models.IntegerField()
is_leaft = models.BooleanField(default=False)
is_available = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=255)
create_username = models.CharField(max_length=50)
create_time = models.DateTimeField(default=datetime.datetime.now())
users = models.ManyToManyField(User)
permission = models.ManyToManyField(Permission)
menu = models.ManyToManyField(Menu)
is_available = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class AnonymousUser(object):
id = None
pk = None
username = ''
def __init__(self):
pass
def is_authenticated(self):
return False
| {
"content_hash": "90e782f6dc752960786cffd7af3a7930",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 71,
"avg_line_length": 27.2,
"alnum_prop": 0.6901960784313725,
"repo_name": "GavinZhuLei/GavinsDjango",
"id": "e9e0b9d58357cae021407896d35af6643e7a008d",
"size": "2086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/gauth/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1627563"
},
{
"name": "HTML",
"bytes": "116063"
},
{
"name": "JavaScript",
"bytes": "1646538"
},
{
"name": "Python",
"bytes": "37419"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('action', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PageConversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conversation_replied', models.BooleanField(default=False)),
('reply_message', models.TextField(blank=True, null=True)),
('conversation', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='PageSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pageid', models.TextField(blank=True, null=True)),
('access_token', models.TextField(blank=True, null=True)),
],
),
]
| {
"content_hash": "92811e3fcdc210e03147cac06bf3903d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 114,
"avg_line_length": 36.064516129032256,
"alnum_prop": 0.5858676207513417,
"repo_name": "bhanduroshan/fbstats-docker",
"id": "f31dc0019b4ab86fa0ad18f4338f52b43a65d7d5",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "action/migrations/0002_pageconversation_pagesettings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "125498"
},
{
"name": "HTML",
"bytes": "52729"
},
{
"name": "JavaScript",
"bytes": "1787"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Python",
"bytes": "263690"
},
{
"name": "Shell",
"bytes": "8068"
}
],
"symlink_target": ""
} |
"""Tests for data_provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.cyclegan import data_provider
mock = tf.test.mock
class DataProviderTest(tf.test.TestCase):
def setUp(self):
super(DataProviderTest, self).setUp()
self.testdata_dir = os.path.join(
flags.FLAGS.test_srcdir,
'tensorflow_gan/examples/cyclegan/testdata')
def test_normalize_image(self):
image = tf.random.uniform(shape=(8, 8, 3), maxval=256, dtype=tf.int32)
rescaled_image = data_provider.normalize_image(image)
self.assertEqual(tf.float32, rescaled_image.dtype)
self.assertListEqual(image.shape.as_list(), rescaled_image.shape.as_list())
with self.cached_session() as sess:
rescaled_image_out = sess.run(rescaled_image)
self.assertTrue(np.all(np.abs(rescaled_image_out) <= 1.0))
def test_sample_patch(self):
image = tf.zeros(shape=(8, 8, 3))
patch1 = data_provider._sample_patch(image, 7)
patch2 = data_provider._sample_patch(image, 10)
image = tf.zeros(shape=(8, 8, 1))
patch3 = data_provider._sample_patch(image, 10)
with self.cached_session() as sess:
self.assertTupleEqual((7, 7, 3), sess.run(patch1).shape)
self.assertTupleEqual((10, 10, 3), sess.run(patch2).shape)
self.assertTupleEqual((10, 10, 3), sess.run(patch3).shape)
def test_custom_dataset_provider(self):
if tf.executing_eagerly():
# dataset.make_initializable_iterator is not supported when eager
# execution is enabled.
return
file_pattern = os.path.join(self.testdata_dir, '*.jpg')
images_ds = data_provider._provide_custom_dataset(file_pattern)
self.assertEqual(tf.uint8, images_ds.output_types)
iterator = tf.data.make_initializable_iterator(images_ds)
with self.cached_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(iterator.initializer)
images_out = sess.run(iterator.get_next())
self.assertEqual(3, images_out.shape[-1])
def test_custom_datasets_provider(self):
if tf.executing_eagerly():
# dataset.make_initializable_iterator is not supported when eager
# execution is enabled.
return
file_pattern = os.path.join(self.testdata_dir, '*.jpg')
batch_size = 3
patch_size = 8
images_ds_list = data_provider.provide_custom_datasets(
batch_size=batch_size,
image_file_patterns=[file_pattern, file_pattern],
patch_size=patch_size)
for images_ds in images_ds_list:
self.assertListEqual([None, patch_size, patch_size, 3],
images_ds.output_shapes.as_list())
self.assertEqual(tf.float32, images_ds.output_types)
iterators = [tf.data.make_initializable_iterator(x) for x in images_ds_list]
initialiers = [x.initializer for x in iterators]
img_tensors = [x.get_next() for x in iterators]
with self.cached_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(initialiers)
images_out_list = sess.run(img_tensors)
for images_out in images_out_list:
self.assertTupleEqual((batch_size, patch_size, patch_size, 3),
images_out.shape)
self.assertTrue(np.all(np.abs(images_out) <= 1.0))
def test_custom_data_provider(self):
if tf.executing_eagerly():
# dataset.make_initializable_iterator is not supported when eager
# execution is enabled.
return
file_pattern = os.path.join(self.testdata_dir, '*.jpg')
batch_size = 3
patch_size = 8
images_list = data_provider.provide_custom_data(
batch_size=batch_size,
image_file_patterns=[file_pattern, file_pattern],
patch_size=patch_size)
for images in images_list:
self.assertListEqual([batch_size, patch_size, patch_size, 3],
images.shape.as_list())
self.assertEqual(tf.float32, images.dtype)
with self.cached_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
images_out_list = sess.run(images_list)
for images_out in images_out_list:
self.assertTupleEqual((batch_size, patch_size, patch_size, 3),
images_out.shape)
self.assertTrue(np.all(np.abs(images_out) <= 1.0))
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "c9f866e23b2d945bf9d0e4458e450b55",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 80,
"avg_line_length": 37.55833333333333,
"alnum_prop": 0.664299977812292,
"repo_name": "tensorflow/gan",
"id": "0174cda8e31a8b38c06f0f5d6a6184c71bd34404",
"size": "5114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_gan/examples/cyclegan/data_provider_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1515604"
},
{
"name": "Python",
"bytes": "1263660"
},
{
"name": "Shell",
"bytes": "8407"
}
],
"symlink_target": ""
} |
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
print('CCXT Version:', ccxt.__version__)
# Must read before your start:
#
# - https://github.com/ccxt/ccxt/wiki/Manual
# - https://github.com/ccxt/ccxt/wiki/Manual#implicit-api-methods
# - https://github.com/ccxt/ccxt/wiki/Manual#unified-api
#
# In short, Binance's API is structured as follows and you should understand
# the meaning and the difference between ISOLATED vs CROSSED margin mode and
# the difference between Hedged positions vs One-way positions.
#
# - wapi: funding for withdrawals and deposits (wapi)
# - api: spot (api)
# - sapi: spot margin
# - CROSSED margin mode
# - Hedged positions
# - One-way positions
# - ISOLATED margin mode
# - Hedged positions
# - One-way positions
# - fapi: swap/perpetual futures margin
# - CROSSED margin mode
# - Hedged positions
# - One-way positions
# - ISOLATED margin mode
# - Hedged positions
# - One-way positions
# - dapi: classic delivery futures margin
# - CROSSED margin mode
# - Hedged positions
# - One-way positions
# - ISOLATED margin mode
# - Hedged positions
# - One-way positions
#
# You should pick the following:
#
# 1. which API you want to trade (fapi, i believe)
# 2. which specific margin mode you want (CROSSED or ISOLATED)
# 3. which specific position mode you want (Hedged or One-way)
#
# Differences in margin modes:
#
# - CROSSED margin mode = you have one futures-margin account for all your positions,
# if some position requires too much margin, your entire account is affected,
# leaving less margin for the other positions,
# thus you share the same margin _"across"_ all your positions
#
# - ISOLATED margin mode = you have separate futures-margin for each of your positions,
# if some position runs out of margin the other positions are not affected,
# thus your positions are _"isolated"_ from one another
#
# Difference in position modes:
#
# - One-way position mode - when you're in this mode
# there's no such things as LONG or SHORT positions.
# You just buy or sell a number of contracts, and
# if the price goes down, your PnL goes negative,
# if the price goes up, your PnL is positive.
# Thus, the position operates `BOTH` ways, both long and short at the same time,
# the notion of "long" and "short" is abstracted away from you,
# so there's only one way the position can go and that way is called "BOTH".
#
# - Hedge mode - you either enter a `LONG` position or a `SHORT` position and
# your PnL calculation rules depend on that
# so there's a number of ways a position can go
#
# Which specific mode of trading (margin mode + position mode) do you want?
def table(values):
first = values[0]
keys = list(first.keys()) if isinstance(first, dict) else range(0, len(first))
widths = [max([len(str(v[k])) for v in values]) for k in keys]
string = ' | '.join(['{:<' + str(w) + '}' for w in widths])
return "\n".join([string.format(*[str(v[k]) for k in keys]) for v in values])
exchange = ccxt.binance({
'apiKey': 'YOUR_API_KEY',
'secret': 'YOUR_SECRET',
'options': {
'defaultType': 'future',
},
})
markets = exchange.load_markets()
symbol = 'BTC/USDT' # YOUR SYMBOL HERE
market = exchange.market(symbol)
exchange.verbose = True # UNCOMMENT THIS AFTER LOADING THE MARKETS FOR DEBUGGING
print('----------------------------------------------------------------------')
print('Fetching your balance:')
response = exchange.fetch_balance()
pprint(response['total']) # make sure you have enough futures margin...
# pprint(response['info']) # more details
print('----------------------------------------------------------------------')
# https://binance-docs.github.io/apidocs/futures/en/#position-information-v2-user_data
print('Getting your positions:')
response = exchange.fapiPrivateV2_get_positionrisk()
print(table(response))
print('----------------------------------------------------------------------')
# https://binance-docs.github.io/apidocs/futures/en/#change-position-mode-trade
print('Getting your current position mode (One-way or Hedge Mode):')
response = exchange.fapiPrivate_get_positionside_dual()
if response['dualSidePosition']:
print('You are in Hedge Mode')
else:
print('You are in One-way Mode')
print('----------------------------------------------------------------------')
# print('Setting your position mode to One-way:')
# response = exchange.fapiPrivate_post_positionside_dual({
# 'dualSidePosition': False,
# })
# print(response)
# print('Setting your positions to Hedge mode:')
# response = exchange.fapiPrivate_post_positionside_dual({
# 'dualSidePosition': True,
# })
# print(response)
# print('----------------------------------------------------------------------')
# # https://binance-docs.github.io/apidocs/futures/en/#change-margin-type-trade
# print('Changing your', symbol, 'position margin mode to CROSSED:')
# response = exchange.fapiPrivate_post_margintype({
# 'symbol': market['id'],
# 'marginType': 'CROSSED',
# })
# print(response)
# print('Changing your', symbol, 'position margin mode to ISOLATED:')
# response = exchange.fapiPrivate_post_margintype({
# 'symbol': market['id'],
# 'marginType': 'ISOLATED',
# })
# print(response)
# print('----------------------------------------------------------------------')
# # https://binance-docs.github.io/apidocs/spot/en/#new-future-account-transfer-futures
# code = 'USDT'
# amount = 123.45
# currency = exchange.currency(code)
# print('Moving', code, 'funds from your spot account to your futures account:')
# response = exchange.sapi_post_futures_transfer({
# 'asset': currency['id'],
# 'amount': exchange.currency_to_precision(code, amount),
# # 1: transfer from spot account to USDT-Ⓜ futures account.
# # 2: transfer from USDT-Ⓜ futures account to spot account.
# # 3: transfer from spot account to COIN-Ⓜ futures account.
# # 4: transfer from COIN-Ⓜ futures account to spot account.
# 'type': 1,
# })
# print('----------------------------------------------------------------------')
# # for ISOLATED positions only
# print('Modifying your ISOLATED', symbol, 'position margin:')
# response = exchange.fapiPrivate_post_positionmargin({
# 'symbol': market['id'],
# 'amount': 123.45, # ←-------------- YOUR AMOUNT HERE
# 'positionSide': 'BOTH', # use BOTH for One-way positions, LONG or SHORT for Hedge Mode
# 'type': 1, # 1 = add position margin, 2 = reduce position margin
# })
# print('----------------------------------------------------------------------')
| {
"content_hash": "ca66e08e2f435a655ec8d45ab23aa066",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 93,
"avg_line_length": 36.031088082901555,
"alnum_prop": 0.6151855047454702,
"repo_name": "ccxt/ccxt",
"id": "4957822be46ae194367ed38e9fbeb3a4e0ece542",
"size": "6990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/py/binance-futures-margin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
} |
"""
This module imitates a real module, providing standard syntax
like from `plumbum.colors` and from `plumbum.colors.bg` to work alongside
all the standard syntax for colors.
"""
import atexit
import sys
from plumbum.colorlib import ansicolors, main
_reset = ansicolors.reset.now
if __name__ == "__main__":
main()
else: # Don't register an exit if this is called using -m!
atexit.register(_reset)
sys.modules[__name__ + ".fg"] = ansicolors.fg
sys.modules[__name__ + ".bg"] = ansicolors.bg
sys.modules[__name__] = ansicolors # type: ignore[assignment]
| {
"content_hash": "371fc7499a98bf42c734a4fd26509abf",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 27,
"alnum_prop": 0.7001763668430335,
"repo_name": "tomerfiliba/plumbum",
"id": "07ec3f033add35d88ed2f2d77121e8c96af2f613",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plumbum/colors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "456351"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
('display', '0002_auto_20150505_0038'),
]
operations = [
migrations.AddField(
model_name='template',
name='sites',
field=models.ManyToManyField(to='sites.Site'),
),
]
| {
"content_hash": "6e86662c4d1a17432d68c4582ba350b7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 22.105263157894736,
"alnum_prop": 0.580952380952381,
"repo_name": "liddiard/skry",
"id": "6ebb875fb7f6b3b0c2846dca86d70563b06c23c1",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "display/migrations/0003_template_sites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139122"
}
],
"symlink_target": ""
} |
"""Generates a sysroot tarball for building a specific package.
Meant for use after setup_board and build_packages have been run.
"""
import os
from chromite.buildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import commandline
from chromite.lib import osutils
from chromite.lib import sudo
DEFAULT_NAME = 'sysroot_%(package)s.tar.xz'
PACKAGE_SEPARATOR = '/'
SYSROOT = 'sysroot'
def ParseCommandLine(argv):
"""Parse args, and run environment-independent checks."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--board', required=True,
help=('The board to generate the sysroot for.'))
parser.add_argument('--package', required=True,
help=('The package to generate the sysroot for.'))
parser.add_argument('--out-dir', type=osutils.ExpandPath, required=True,
help='Directory to place the generated tarball.')
parser.add_argument('--out-file',
help=('The name to give to the tarball. Defaults to %r.'
% DEFAULT_NAME))
options = parser.parse_args(argv)
if not options.out_file:
options.out_file = DEFAULT_NAME % {
'package': options.package.replace(PACKAGE_SEPARATOR, '_')
}
return options
class GenerateSysroot(object):
"""Wrapper for generation functionality."""
PARALLEL_EMERGE = os.path.join(constants.CHROMITE_BIN_DIR, 'parallel_emerge')
def __init__(self, sysroot, options):
"""Initialize
Arguments:
sysroot: Path to sysroot.
options: Parsed options.
"""
self.sysroot = sysroot
self.options = options
def _InstallToolchain(self):
cros_build_lib.RunCommand(
[os.path.join(constants.CROSUTILS_DIR, 'install_toolchain'),
'--noconfigure', '--board_root', self.sysroot, '--board',
self.options.board])
def _InstallKernelHeaders(self):
cros_build_lib.SudoRunCommand(
[self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--root-deps=rdeps', '--getbinpkg', '--usepkg',
'--root=%s' % self.sysroot, 'sys-kernel/linux-headers'])
def _InstallBuildDependencies(self):
cros_build_lib.SudoRunCommand(
[self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--root=%s' % self.sysroot, '--usepkg', '--onlydeps',
'--usepkg-exclude=%s' % self.options.package, self.options.package])
def _CreateTarball(self):
target = os.path.join(self.options.out_dir, self.options.out_file)
cros_build_lib.CreateTarball(target, self.sysroot, sudo=True)
def Perform(self):
"""Generate the sysroot."""
self._InstallToolchain()
self._InstallKernelHeaders()
self._InstallBuildDependencies()
self._CreateTarball()
def FinishParsing(options):
"""Run environment dependent checks on parsed args."""
target = os.path.join(options.out_dir, options.out_file)
if os.path.exists(target):
cros_build_lib.Die('Output file %r already exists.' % target)
if not os.path.isdir(options.out_dir):
cros_build_lib.Die(
'Non-existent directory %r specified for --out-dir' % options.out_dir)
def main(argv):
options = ParseCommandLine(argv)
FinishParsing(options)
cros_build_lib.AssertInsideChroot()
with sudo.SudoKeepAlive(ttyless_sudo=False):
with osutils.TempDir(set_global=True, sudo_rm=True) as tempdir:
sysroot = os.path.join(tempdir, SYSROOT)
os.mkdir(sysroot)
GenerateSysroot(sysroot, options).Perform()
| {
"content_hash": "76d3b66ea62dbec6ed9c17ceca79c224",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 32.953271028037385,
"alnum_prop": 0.6695972773681225,
"repo_name": "windyuuy/opera",
"id": "9a38c48f82ec7954f82e6bfb9c0466babdbffd32",
"size": "3714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/src/third_party/chromite/scripts/cros_generate_sysroot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "51642"
},
{
"name": "Batchfile",
"bytes": "35942"
},
{
"name": "C",
"bytes": "4303018"
},
{
"name": "C#",
"bytes": "35203"
},
{
"name": "C++",
"bytes": "207333360"
},
{
"name": "CMake",
"bytes": "25089"
},
{
"name": "CSS",
"bytes": "681256"
},
{
"name": "Dart",
"bytes": "24294"
},
{
"name": "Emacs Lisp",
"bytes": "25534"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "10400943"
},
{
"name": "IDL",
"bytes": "836"
},
{
"name": "Java",
"bytes": "2821184"
},
{
"name": "JavaScript",
"bytes": "14563996"
},
{
"name": "Lua",
"bytes": "13749"
},
{
"name": "Makefile",
"bytes": "55521"
},
{
"name": "Objective-C",
"bytes": "1211523"
},
{
"name": "Objective-C++",
"bytes": "6221908"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "82949"
},
{
"name": "Protocol Buffer",
"bytes": "280464"
},
{
"name": "Python",
"bytes": "12627773"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "894814"
},
{
"name": "VimL",
"bytes": "4953"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import math
from op_test import OpTest
def quantize_max_abs(x, num_bits):
range = math.pow(2, num_bits) - 1
scale = np.max(np.abs(x).flatten())
y = np.round(x / scale * range)
return y, scale
def dequantize_max_abs(x, num_bits, scale):
range = math.pow(2, num_bits) - 1
y = (scale / range) * x
return y
class TestFakeDequantizeMaxAbsOp(OpTest):
def set_args(self):
self.num_bits = 8
def setUp(self):
self.set_args()
self.op_type = "fake_dequantize_max_abs"
x = np.random.randn(31, 65).astype("float32")
yq, scale = quantize_max_abs(x, self.num_bits)
print 'scale ', scale
ydq = dequantize_max_abs(yq, self.num_bits, scale)
self.inputs = {'X': yq}
self.attrs = {'num_bits': self.num_bits, 'scale': float(scale)}
self.outputs = {'Out': ydq}
def test_check_output(self):
self.check_output()
class TestFakeDequantizeMaxAbsOp5Bits(OpTest):
def set_args(self):
self.num_bits = 5
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e1fbad22f3c11bf9459155553e770198",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 71,
"avg_line_length": 24.08695652173913,
"alnum_prop": 0.5992779783393501,
"repo_name": "Canpio/Paddle",
"id": "281068e945e76a42635868d19573498f79fde1f3",
"size": "1721",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274629"
},
{
"name": "C++",
"bytes": "4761657"
},
{
"name": "CMake",
"bytes": "209462"
},
{
"name": "CSS",
"bytes": "21730"
},
{
"name": "Cuda",
"bytes": "738162"
},
{
"name": "Go",
"bytes": "99765"
},
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "JavaScript",
"bytes": "1025"
},
{
"name": "Perl",
"bytes": "11452"
},
{
"name": "Protocol Buffer",
"bytes": "54402"
},
{
"name": "Python",
"bytes": "1526791"
},
{
"name": "Shell",
"bytes": "136472"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1SecurityContext(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allow_privilege_escalation': 'bool',
'capabilities': 'V1Capabilities',
'privileged': 'bool',
'proc_mount': 'str',
'read_only_root_filesystem': 'bool',
'run_as_group': 'int',
'run_as_non_root': 'bool',
'run_as_user': 'int',
'se_linux_options': 'V1SELinuxOptions',
'seccomp_profile': 'V1SeccompProfile',
'windows_options': 'V1WindowsSecurityContextOptions'
}
attribute_map = {
'allow_privilege_escalation': 'allowPrivilegeEscalation',
'capabilities': 'capabilities',
'privileged': 'privileged',
'proc_mount': 'procMount',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'run_as_group': 'runAsGroup',
'run_as_non_root': 'runAsNonRoot',
'run_as_user': 'runAsUser',
'se_linux_options': 'seLinuxOptions',
'seccomp_profile': 'seccompProfile',
'windows_options': 'windowsOptions'
}
def __init__(self, allow_privilege_escalation=None, capabilities=None, privileged=None, proc_mount=None, read_only_root_filesystem=None, run_as_group=None, run_as_non_root=None, run_as_user=None, se_linux_options=None, seccomp_profile=None, windows_options=None, local_vars_configuration=None): # noqa: E501
"""V1SecurityContext - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allow_privilege_escalation = None
self._capabilities = None
self._privileged = None
self._proc_mount = None
self._read_only_root_filesystem = None
self._run_as_group = None
self._run_as_non_root = None
self._run_as_user = None
self._se_linux_options = None
self._seccomp_profile = None
self._windows_options = None
self.discriminator = None
if allow_privilege_escalation is not None:
self.allow_privilege_escalation = allow_privilege_escalation
if capabilities is not None:
self.capabilities = capabilities
if privileged is not None:
self.privileged = privileged
if proc_mount is not None:
self.proc_mount = proc_mount
if read_only_root_filesystem is not None:
self.read_only_root_filesystem = read_only_root_filesystem
if run_as_group is not None:
self.run_as_group = run_as_group
if run_as_non_root is not None:
self.run_as_non_root = run_as_non_root
if run_as_user is not None:
self.run_as_user = run_as_user
if se_linux_options is not None:
self.se_linux_options = se_linux_options
if seccomp_profile is not None:
self.seccomp_profile = seccomp_profile
if windows_options is not None:
self.windows_options = windows_options
@property
def allow_privilege_escalation(self):
"""Gets the allow_privilege_escalation of this V1SecurityContext. # noqa: E501
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:return: The allow_privilege_escalation of this V1SecurityContext. # noqa: E501
:rtype: bool
"""
return self._allow_privilege_escalation
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, allow_privilege_escalation):
"""Sets the allow_privilege_escalation of this V1SecurityContext.
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:param allow_privilege_escalation: The allow_privilege_escalation of this V1SecurityContext. # noqa: E501
:type: bool
"""
self._allow_privilege_escalation = allow_privilege_escalation
@property
def capabilities(self):
"""Gets the capabilities of this V1SecurityContext. # noqa: E501
:return: The capabilities of this V1SecurityContext. # noqa: E501
:rtype: V1Capabilities
"""
return self._capabilities
@capabilities.setter
def capabilities(self, capabilities):
"""Sets the capabilities of this V1SecurityContext.
:param capabilities: The capabilities of this V1SecurityContext. # noqa: E501
:type: V1Capabilities
"""
self._capabilities = capabilities
@property
def privileged(self):
"""Gets the privileged of this V1SecurityContext. # noqa: E501
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:return: The privileged of this V1SecurityContext. # noqa: E501
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""Sets the privileged of this V1SecurityContext.
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:param privileged: The privileged of this V1SecurityContext. # noqa: E501
:type: bool
"""
self._privileged = privileged
@property
def proc_mount(self):
"""Gets the proc_mount of this V1SecurityContext. # noqa: E501
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:return: The proc_mount of this V1SecurityContext. # noqa: E501
:rtype: str
"""
return self._proc_mount
@proc_mount.setter
def proc_mount(self, proc_mount):
"""Sets the proc_mount of this V1SecurityContext.
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:param proc_mount: The proc_mount of this V1SecurityContext. # noqa: E501
:type: str
"""
self._proc_mount = proc_mount
@property
def read_only_root_filesystem(self):
"""Gets the read_only_root_filesystem of this V1SecurityContext. # noqa: E501
Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:return: The read_only_root_filesystem of this V1SecurityContext. # noqa: E501
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""Sets the read_only_root_filesystem of this V1SecurityContext.
Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:param read_only_root_filesystem: The read_only_root_filesystem of this V1SecurityContext. # noqa: E501
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
@property
def run_as_group(self):
"""Gets the run_as_group of this V1SecurityContext. # noqa: E501
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:return: The run_as_group of this V1SecurityContext. # noqa: E501
:rtype: int
"""
return self._run_as_group
@run_as_group.setter
def run_as_group(self, run_as_group):
"""Sets the run_as_group of this V1SecurityContext.
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:param run_as_group: The run_as_group of this V1SecurityContext. # noqa: E501
:type: int
"""
self._run_as_group = run_as_group
@property
def run_as_non_root(self):
"""Gets the run_as_non_root of this V1SecurityContext. # noqa: E501
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
:return: The run_as_non_root of this V1SecurityContext. # noqa: E501
:rtype: bool
"""
return self._run_as_non_root
@run_as_non_root.setter
def run_as_non_root(self, run_as_non_root):
"""Sets the run_as_non_root of this V1SecurityContext.
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
:param run_as_non_root: The run_as_non_root of this V1SecurityContext. # noqa: E501
:type: bool
"""
self._run_as_non_root = run_as_non_root
@property
def run_as_user(self):
"""Gets the run_as_user of this V1SecurityContext. # noqa: E501
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:return: The run_as_user of this V1SecurityContext. # noqa: E501
:rtype: int
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""Sets the run_as_user of this V1SecurityContext.
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
:param run_as_user: The run_as_user of this V1SecurityContext. # noqa: E501
:type: int
"""
self._run_as_user = run_as_user
@property
def se_linux_options(self):
"""Gets the se_linux_options of this V1SecurityContext. # noqa: E501
:return: The se_linux_options of this V1SecurityContext. # noqa: E501
:rtype: V1SELinuxOptions
"""
return self._se_linux_options
@se_linux_options.setter
def se_linux_options(self, se_linux_options):
"""Sets the se_linux_options of this V1SecurityContext.
:param se_linux_options: The se_linux_options of this V1SecurityContext. # noqa: E501
:type: V1SELinuxOptions
"""
self._se_linux_options = se_linux_options
@property
def seccomp_profile(self):
"""Gets the seccomp_profile of this V1SecurityContext. # noqa: E501
:return: The seccomp_profile of this V1SecurityContext. # noqa: E501
:rtype: V1SeccompProfile
"""
return self._seccomp_profile
@seccomp_profile.setter
def seccomp_profile(self, seccomp_profile):
"""Sets the seccomp_profile of this V1SecurityContext.
:param seccomp_profile: The seccomp_profile of this V1SecurityContext. # noqa: E501
:type: V1SeccompProfile
"""
self._seccomp_profile = seccomp_profile
@property
def windows_options(self):
"""Gets the windows_options of this V1SecurityContext. # noqa: E501
:return: The windows_options of this V1SecurityContext. # noqa: E501
:rtype: V1WindowsSecurityContextOptions
"""
return self._windows_options
@windows_options.setter
def windows_options(self, windows_options):
"""Sets the windows_options of this V1SecurityContext.
:param windows_options: The windows_options of this V1SecurityContext. # noqa: E501
:type: V1WindowsSecurityContextOptions
"""
self._windows_options = windows_options
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SecurityContext):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SecurityContext):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "07d516c64cc878addd161f97c87d9787",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 435,
"avg_line_length": 41.67602040816327,
"alnum_prop": 0.6594233947481177,
"repo_name": "kubernetes-client/python",
"id": "161787ca4eb99fd814dce1527c45ede3dcb43b33",
"size": "16354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_security_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
} |
import unittest
class TestLogSettingUser(unittest.TestCase):
pass
| {
"content_hash": "6a82c9e30dcbadf26ebb65110d7944ff",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 44,
"avg_line_length": 17,
"alnum_prop": 0.8382352941176471,
"repo_name": "mhbu50/frappe",
"id": "45f7b61158757ad36359064fed086c8a19f48457",
"size": "195",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/log_setting_user/test_log_setting_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "247122"
},
{
"name": "JavaScript",
"bytes": "2359670"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3464477"
},
{
"name": "SCSS",
"bytes": "248877"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
} |
import os
import mimetypes
from hashlib import md5
from django.db import models
from instance import Instance
def upload_to(instance, filename):
return os.path.join(
instance.instance.xform.user.username,
'attachments',
os.path.split(filename)[1])
def hash_attachment_contents(contents):
return u'%s' % md5(contents).hexdigest()
def generate_attachment_filename(instance, filename):
xform = instance.xform
return os.path.join(
xform.user.username,
'attachments',
xform.uuid or 'form',
instance.uuid or 'instance',
os.path.split(filename)[1])
class Attachment(models.Model):
instance = models.ForeignKey(Instance, related_name="attachments")
media_file = models.FileField(upload_to=upload_to)
mimetype = models.CharField(
max_length=50, null=False, blank=True, default='')
class Meta:
app_label = 'logger'
def save(self, *args, **kwargs):
if self.media_file and self.mimetype == '':
# guess mimetype
mimetype, encoding = mimetypes.guess_type(self.media_file.name)
if mimetype:
self.mimetype = mimetype
super(Attachment, self).save(*args, **kwargs)
@property
def file_hash(self):
if self.media_file.storage.exists(self.media_file.name):
return u'%s' % md5(self.media_file.read()).hexdigest()
return u''
@property
def filename(self):
return os.path.basename(self.media_file.name)
| {
"content_hash": "e6212ae59695ef1228ab0a491f94ba94",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 28.24074074074074,
"alnum_prop": 0.640655737704918,
"repo_name": "awemulya/fieldsight-kobocat",
"id": "021170f7081a861cdfdba52f1f7372a221262eba",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onadata/apps/logger/models/attachment.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "Dockerfile",
"bytes": "2462"
},
{
"name": "HTML",
"bytes": "1488442"
},
{
"name": "JavaScript",
"bytes": "674757"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "5340355"
},
{
"name": "Shell",
"bytes": "16493"
}
],
"symlink_target": ""
} |
"""Implements authentication based on signed GCE VM metadata tokens.
See https://cloud.google.com/compute/docs/instances/verifying-instance-identity.
JWTs with signed metadata are read from X-Luci-Gce-Vm-Token header. The 'aud'
field in the tokens is expected to be https://(.*-dot-)?<app-id>.appspot.com.
On successful validation, the bot is authenticated as
bot:<instance-name>@gce.<project>[.<realm>]
Additional details are then available via get_auth_details():
gce_instance - an instance name extracted from the token, as is.
gce_project - a project name extracted from the token, as is.
Prefer to use get_auth_details() for authorization checks instead of parsing
"bot:..." identifier, since there's less chance of a mistake that way.
"""
import logging
import re
from . import api
from . import model
from . import signature
from . import tokens
from components import utils
from google.appengine.api import app_identity
# Part of public API of 'auth' component, exposed by this module.
__all__ = [
'gce_vm_authentication',
'optional_gce_vm_authentication',
]
# HTTP header that carries the GCE VM token.
GCE_VM_TOKEN_HEADER = 'X-Luci-Gce-Vm-Token'
class BadTokenError(api.AuthenticationError):
"""Raised if the supplied GCE VM token is not valid."""
def gce_vm_authentication(request):
"""Reads and validates X-Luci-Gce-Vm-Token header, if present.
See components.auth.handler.AuthenticatingHandler.get_auth_methods for details
of the expected interface.
Args:
request: webapp2.Request with the incoming request.
Returns:
(auth.Identity, AuthDetails) on success.
(None, None) if there's no VM token header (which means this authentication
method is not applicable).
Raises:
BadTokenError (which is api.AuthenticationError) if VM token header is
present, but the token is invalid.
CertificateError on transient errors when fetching google certs.
"""
token = request.headers.get(GCE_VM_TOKEN_HEADER)
if not token:
return None, None
# Fetch (most likely already cached) Google OAuth2 certs.
certs = signature.get_google_oauth2_certs()
# Make sure the JWT is signed by Google, and not yet expired.
try:
_, payload = tokens.verify_jwt(token, certs)
except (signature.CertificateError, tokens.InvalidTokenError) as exc:
raise BadTokenError('Invalid GCE VM token: %s' % exc)
# The valid payload looks like this:
# {
# "iss": "[TOKEN_ISSUER]",
# "iat": [ISSUED_TIME],
# "exp": [EXPIRED_TIME],
# "aud": "[AUDIENCE]",
# "sub": "[SUBJECT]",
# "azp": "[AUTHORIZED_PARTY]",
# "google": {
# "compute_engine": {
# "project_id": "[PROJECT_ID]",
# "project_number": [PROJECT_NUMBER],
# "zone": "[ZONE]",
# "instance_id": [INSTANCE_ID],
# "instance_name": "[INSTANCE_NAME]"
# "instance_creation_timestamp": [CREATION_TIMESTAMP]
# }
# }
# }
# Verify the token was intended for us.
allowed = _allowed_audience_re()
aud = str(payload.get('aud', ''))
if not allowed.match(aud):
raise BadTokenError(
'Bad audience in GCE VM token: got %r, expecting %r' %
(aud, allowed.pattern))
# The token should have 'google.compute_engine' field, which happens only if
# it was generated with format=full.
gce = payload.get('google', {}).get('compute_engine')
if not gce:
raise BadTokenError(
'No google.compute_engine in the GCE VM token, use "full" format')
if not isinstance(gce, dict):
raise BadTokenError('Wrong type for compute_engine: %r' % (gce,))
instance_name = gce.get('instance_name')
if not isinstance(instance_name, basestring):
raise BadTokenError('Wrong type for instance_name: %r' % (instance_name,))
project_id = gce.get('project_id')
if not isinstance(project_id, basestring):
raise BadTokenError('Wrong type for project_id: %r' % (project_id,))
details = api.new_auth_details(
gce_instance=str(instance_name),
gce_project=str(project_id))
# Convert '<realm>:<project>' to '<project>.<realm>' for bot:... string.
domain = details.gce_project
if ':' in domain:
realm, proj = domain.split(':', 1)
domain = '%s.%s' % (proj, realm)
# The token is valid. Construct and validate bot identity.
try:
ident = model.Identity(
model.IDENTITY_BOT, '%s@gce.%s' % (details.gce_instance, domain))
except ValueError as exc:
raise BadTokenError(str(exc))
return ident, details
def optional_gce_vm_authentication(request):
"""It's like gce_vm_authentication except it ignores broken tokens.
Usable during development and initial roll out when GCE VM tokens may not
be working.
"""
try:
return gce_vm_authentication(request)
except BadTokenError as exc:
logging.error('Skipping GCE VM auth, it returned an error: %s', exc)
return None, None
@utils.cache
def _allowed_audience_re():
"""Returns a regular expression for allowed 'aud' field."""
return _audience_re(app_identity.get_default_version_hostname())
# Extracted into a separate function for simpler testing.
def _audience_re(hostname):
return re.compile(r'^https\://([a-z0-9\-_]+-dot-)?'+re.escape(hostname)+'$')
| {
"content_hash": "91db0a10141fec95d8c7064a4e7207a3",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 80,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.6842911877394636,
"repo_name": "luci/luci-py",
"id": "41559ea75c4951226927f04295e8cc68d6073888",
"size": "5394",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "appengine/components/components/auth/gce_vm_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('public_interface', '0005_auto_20210313_1919'),
]
operations = [
migrations.AddField(
model_name='vouchers',
name='flickr_photo_url',
field=models.TextField(blank=True, null=True),
),
]
| {
"content_hash": "49f29518cda000978efed683af57ad22",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 22.75,
"alnum_prop": 0.5879120879120879,
"repo_name": "carlosp420/VoSeq",
"id": "f7546fbbf299d0441584fb1dfc1bb398dea73733",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public_interface/migrations/0006_vouchers_flickr_photo_url.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19319"
},
{
"name": "HTML",
"bytes": "95764"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "357630"
},
{
"name": "Shell",
"bytes": "11587"
}
],
"symlink_target": ""
} |
import uuid
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from codenode.frontend.notebook.models import Notebook
class Folder(models.Model):
guid = models.CharField(max_length=32, unique=True, editable=False) #needs to be globally unique
owner = models.ForeignKey(User)
parent = models.ForeignKey('Folder', null=True)
title = models.CharField(max_length=100)
notebooks = models.ManyToManyField(Notebook, blank=True, related_name='folder_notebooks')
def save(self):
if not self.guid:
self.guid = unicode(uuid.uuid4()).replace("-", "")
super(Folder, self).save()
class Meta:
verbose_name = _('Bookshelf Folder')
verbose_name_plural = _('Bookshelf Folder')
def __unicode__(self):
if parent is None:
return u"Root folder '%s' (owner: '%s')" % (self.title, self.owner)
else:
return u"Folder '%s' (parent: '%s', owner: '%s')" % (self.title, self.parent.title, self.owner)
| {
"content_hash": "a5150fe72c19d9a6f398213051ed4fe9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 107,
"avg_line_length": 35.733333333333334,
"alnum_prop": 0.6567164179104478,
"repo_name": "regmi/codenode-unr",
"id": "08d720e46d47a56833a0a0a06e1002340b482927",
"size": "1473",
"binary": false,
"copies": "1",
"ref": "refs/heads/femhub",
"path": "codenode/frontend/bookshelf/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "673814"
},
{
"name": "Python",
"bytes": "613824"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.