text stringlengths 4 1.02M | meta dict |
|---|---|
'''Database specifications for an evaluation protocol based on the Iris Flower
databases from Fisher's original work.'''
import numpy
import pkg_resources
# A list of protocols we implement
PROTOCOLS = {
'proto1': {'train': range(0, 30), 'test': range(30, 50)},
'proto2': {'train': range(20, 50), 'test': range(0, 20)},
}
# Subsets of the database in each protocol
SUBSETS = [
'train',
'test',
]
# The types of Iris flowers in the dataset
CLASSES = [
'setosa',
'versicolor',
'virginica',
]
# The four values that were sampled
VARIABLES = [
'sepal length',
'sepal width',
'petal length',
'petal width',
]
def load():
'''Loads the data from its CSV format into an easy to dictionary of arrays'''
import csv
data = dict([(k,[]) for k in CLASSES])
with open(pkg_resources.resource_filename(__name__, 'data.csv'), 'rt') as f:
reader = csv.reader(f)
for k, row in enumerate(reader):
if not k: continue
data[row[4]].append(numpy.array([float(z) for z in row[:4]]))
for k in CLASSES:
data[k] = numpy.vstack(data[k])
return data
def split_data(data, subset, splits):
'''Returns the data for a given protocol
'''
return dict([(k, data[k][splits[subset]]) for k in data])
def get(protocol, subset, classes=CLASSES, variables=VARIABLES):
'''Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature.
'''
retval = split_data(load(), subset, PROTOCOLS[protocol])
# filter variables (features)
varindex = [VARIABLES.index(k) for k in variables]
# filter class names and variable indexes at the same time
retval = dict([(k, retval[k][:,varindex]) for k in classes])
# squash the data
return numpy.array([retval[k] for k in classes])
| {
"content_hash": "e622f014b394b5283927207c974bf992",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 26.311827956989248,
"alnum_prop": 0.6501838986514099,
"repo_name": "anjos/rrpack",
"id": "14490c131edd28562e2834dae4fd1164a5648b3b",
"size": "2557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rr/database.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26405"
}
],
"symlink_target": ""
} |
"""Updates ACL item with specified ID"""
from baseCmd import *
from baseResponse import *
class updateNetworkACLItemCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the network ACL item"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""scl entry action, allow or deny"""
self.action = None
self.typeInfo['action'] = 'string'
"""the cidr list to allow traffic from/to"""
self.cidrlist = []
self.typeInfo['cidrlist'] = 'list'
"""an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"""
self.customid = None
self.typeInfo['customid'] = 'string'
"""the ending port of ACL"""
self.endport = None
self.typeInfo['endport'] = 'integer'
"""an optional field, whether to the display the rule to the end user or not"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""error code for this ICMP message"""
self.icmpcode = None
self.typeInfo['icmpcode'] = 'integer'
"""type of the ICMP message being sent"""
self.icmptype = None
self.typeInfo['icmptype'] = 'integer'
"""The network of the vm the ACL will be created for"""
self.number = None
self.typeInfo['number'] = 'integer'
"""the protocol for the ACL rule. Valid values are TCP/UDP/ICMP/ALL or valid protocol number"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the starting port of ACL"""
self.startport = None
self.typeInfo['startport'] = 'integer'
"""the traffic type for the ACL,can be Ingress or Egress, defaulted to Ingress if not specified"""
self.traffictype = None
self.typeInfo['traffictype'] = 'string'
self.required = ["id", ]
class updateNetworkACLItemResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the ACL Item"""
self.id = None
self.typeInfo['id'] = 'string'
"""the ID of the ACL this item belongs to"""
self.aclid = None
self.typeInfo['aclid'] = 'string'
"""Action of ACL Item. Allow/Deny"""
self.action = None
self.typeInfo['action'] = 'string'
"""the cidr list to forward traffic from"""
self.cidrlist = None
self.typeInfo['cidrlist'] = 'string'
"""the ending port of ACL's port range"""
self.endport = None
self.typeInfo['endport'] = 'string'
"""is rule for display to the regular user"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""error code for this icmp message"""
self.icmpcode = None
self.typeInfo['icmpcode'] = 'integer'
"""type of the icmp message being sent"""
self.icmptype = None
self.typeInfo['icmptype'] = 'integer'
"""Number of the ACL Item"""
self.number = None
self.typeInfo['number'] = 'integer'
"""the protocol of the ACL"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the starting port of ACL's port range"""
self.startport = None
self.typeInfo['startport'] = 'string'
"""the state of the rule"""
self.state = None
self.typeInfo['state'] = 'string'
"""the traffic type for the ACL"""
self.traffictype = None
self.typeInfo['traffictype'] = 'string'
"""the list of resource tags associated with the network ACLs"""
self.tags = []
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| {
"content_hash": "2bf1d4f44434ecf9ce73508f227f0b8e",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 113,
"avg_line_length": 37.09243697478992,
"alnum_prop": 0.5713638423198912,
"repo_name": "MissionCriticalCloud/marvin",
"id": "e53f7c489086eb8dd886bc6d694f00e4cc46742b",
"size": "4414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marvin/cloudstackAPI/updateNetworkACLItem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2573421"
}
],
"symlink_target": ""
} |
"""
Management class for live migration VM operations.
"""
import functools
from oslo.config import cfg
from oslo.utils import excutils
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
def check_os_version_requirement(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
if not self._livemigrutils:
raise NotImplementedError(_('Live migration is supported '
'starting with Hyper-V Server '
'2012'))
return function(self, *args, **kwds)
return wrapper
class LiveMigrationOps(object):
def __init__(self):
# Live migration is supported starting from Hyper-V Server 2012
if utilsfactory.get_hostutils().check_min_windows_version(6, 2):
self._livemigrutils = utilsfactory.get_livemigrationutils()
else:
self._livemigrutils = None
self._pathutils = utilsfactory.get_pathutils()
self._vmops = vmops.VMOps()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
@check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug("live_migration called", instance=instance_ref)
instance_name = instance_ref["name"]
try:
self._vmops.copy_vm_console_logs(instance_name, dest)
self._livemigrutils.live_migrate_vm(instance_name,
dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Calling live migration recover_method "
"for instance: %s", instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug("Calling live migration post_method for instance: %s",
instance_name)
post_method(context, instance_ref, dest, block_migration)
@check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug("pre_live_migration called", instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not boot_from_volume and instance.image_ref:
self._imagecache.get_cached_image(context, instance)
self._volumeops.initialize_volumes_connection(block_device_info)
@check_os_version_requirement
def post_live_migration(self, context, instance, block_device_info):
self._volumeops.disconnect_volumes(block_device_info)
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug("post_live_migration_at_destination called",
instance=instance_ref)
self._vmops.log_vm_serial_output(instance_ref['name'],
instance_ref['uuid'])
@check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug("check_can_live_migrate_destination called", instance_ref)
return {}
@check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug("check_can_live_migrate_destination_cleanup called")
@check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug("check_can_live_migrate_source called", instance_ref)
return dest_check_data
| {
"content_hash": "56b5bdf2588793febd704667a8d02303",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 41.11009174311926,
"alnum_prop": 0.6065610354831511,
"repo_name": "mgagne/nova",
"id": "72fef72e9620652b84c4a1d85a185c88421feb0a",
"size": "5120",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/virt/hyperv/livemigrationops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15421976"
},
{
"name": "Shell",
"bytes": "21612"
}
],
"symlink_target": ""
} |
"""Run all tutorials successively."""
from galry import run_all_scripts
run_all_scripts()
| {
"content_hash": "a7d0b80e3d23154d01f30f6cc6c71015",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 37,
"avg_line_length": 30,
"alnum_prop": 0.7555555555555555,
"repo_name": "rossant/galry",
"id": "cf4ccdb0eb364647ad969918a0741f0236ed0e45",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/_run.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "24569"
},
{
"name": "Python",
"bytes": "397431"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
} |
from evennia import create_object, search_object, logger
from random import randint
def roll_hit():
"Roll 1d100"
return randint(1, 10)
def roll_dmg():
"Roll 1d6"
return randint(1, 6)
def check_defeat(character):
"Checks if a character is defeated."
if character.db.HP <= 0:
character.msg("You fall down, defeated!")
if character.db.full_HP:
character.db.HP = character.db.full_HP
else:
character.db.HP = 100 # reset
# call enemy hook
if hasattr(character, "set_dead"):
character.location.msg_contents(character.db.death_msg)
# find robot log object
objs = [obj for obj in character.contents_get() if obj.name==character.db.log]
if not objs:
return
else:
obj = objs[0]
print obj.name
# drop robot log in room
obj.move_to(character.location, quiet=True)
character.location.msg_contents("DATA,obj_add," + obj.name + obj.dbref)
character.location.msg_contents("The %s falls to the ground." % obj.name, exclude=character)
# call the object script's at_drop() method.
obj.at_drop(character)
# should return True if target is defeated, False otherwise.
return character.set_dead()
def add_XP(character, amount):
"Add XP to character, tracking level increases."
character.db.XP += amount
character.msg("DATA,xp,%i" % character.db.XP)
if character.db.XP >= (character.db.level + 1) ** 2:
character.db.level += 1
character.db.STR += 1
character.db.combat += 2
character.msg("You're now level %i!" % character.db.level)
character.msg("DATA,level,%i" % character.db.level)
def skill_combat(*args):
"""
This determines outcome of combat. The one who
rolls under their combat skill AND higher than
their opponent's roll hits.
"""
char1, char2 = args
roll1, roll2 = roll_hit(), roll_hit()
print roll1, roll2, char1.db.combat, char2.db.combat
failtext = "%s hits you! %i damage!"
wintext = "You hit %s! %i damage!"
xp_gain = randint(1, 3)
if char1.db.combat*2 >= roll1 *2 > roll2:
# char 1 hits
dmg = roll_dmg() + char1.db.STR
char1.msg(wintext % (char2, dmg))
add_XP(char1, xp_gain)
char2.msg(failtext % (char1, dmg))
char2.db.HP -= dmg
char2.msg("DATA,health,%s" % char2.db.HP)
check_defeat(char2)
"""
if char2.db.combat >= roll2 > roll1:
# char 2 hits
dmg = roll_dmg() + char2.db.STR
char1.msg(failtext % (char2, dmg))
char1.db.HP -= dmg
char1.msg("DATA,hp,%s" % char1.db.HP)
check_defeat(char1)
char2.msg(wintext % (char1, dmg))
add_XP(char2, xp_gain)
"""
else:
# a draw
drawtext = "You miss."
char1.msg(drawtext)
char2.msg(drawtext)
SKILLS = {"kickbox": skill_combat}
def roll_challenge(character1, character2, skillname):
"""
Determine the outcome of a skill challenge between
two characters based on the skillname given.
"""
if skillname in SKILLS:
SKILLS[skillname](character1, character2)
else:
raise RunTimeError("Skillname %s not found." % skillname)
def create_room(room_name, character, location, roomtype_key="Generic", roomtype_value="Generic"):
"""
Create room(if doesn't exist) based on location metadata,
attach script to control room state and move player to the room
"""
rooms = search_object(room_name, typeclass='typeclasses.rooms.Room')
if not rooms: # If room doesn't exists
room = create_object("typeclasses.rooms.Room", key=room_name) # then create room
logger.log_info("Room %s Created" % room)
else:
room=rooms[0]
# set room type if changed or new room
if room.db.roomtype_key != roomtype_key or room.db.roomtype_value != roomtype_value:
room.db.roomtype_key = roomtype_key
room.db.roomtype_value = roomtype_value
if roomtype_key != 'building': # if outdoors
room.scripts.add("typeclasses.scripts.Weather") # attach script to get weather
room.scripts.add("typeclasses.scripts.Outdoors") # and attach script to control room state
if roomtype_value == 'library': # if library
room.scripts.add('typeclasses.scripts.Library')
logger.log_info("Room Type Updated to %s: %s" % (room.db.roomtype_key,room.db.roomtype_value))
if not room.db.location:
room.db.location = location
# teleport character to room, if not already in room
if character.location.name != room_name:
character.move_to(room, quiet=True)
logger.log_info("User entered %s" % room_name)
character.msg("You've entered %s" % room_name)
character.db.just_entered = False
elif character.db.just_entered:
character.msg("You're in %s" % room_name)
character.db.just_entered = False
| {
"content_hash": "4a22b10d25b7189e6aa74df1475f6b71",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 113,
"avg_line_length": 35.625850340136054,
"alnum_prop": 0.595760931831201,
"repo_name": "debanjum/ArMUD",
"id": "86a4ce395f7c4c76b3bae190abd150807e2d584c",
"size": "5237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ArMUD/world/rules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "115439"
},
{
"name": "Python",
"bytes": "115152"
}
],
"symlink_target": ""
} |
import microcircuit.constants as const
from microcircuit.viz import show
from microcircuit.dataset.testconnectome001 import testconnectome
para = {
const.CONNECTOME_CHEMICAL_SYNAPSE: {
'marker': 'o',
'c': 'r',
's': 50
},
const.CONNECTOME_ELECTRICAL_SYNAPSE: {
'marker': '^',
'c': 'b',
's': 40
}
}
nodes = testconnectome.graph.nodes()
b=show(testconnectome, skeleton_order=nodes, use_label=True, display_parameters=para)
| {
"content_hash": "da30666c8105fc60b9c18414435c217e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.6412371134020619,
"repo_name": "unidesigner/microcircuit",
"id": "8fb33b0bddd3ac8f48ba02627fe6b3c798ba9471",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/examples/showmatrix3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "67533"
}
],
"symlink_target": ""
} |
import copy
from django.contrib.sites.models import Site
from cms.api import create_page
from cms.models import Page
from cms.test_utils.testcases import URL_CMS_PAGE, CMSTestCase
from cms.utils.compat import DJANGO_2_2, DJANGO_3_0, DJANGO_3_1
from cms.utils.conf import get_cms_setting
from cms.utils.urlutils import admin_reverse
class SiteTestCase(CMSTestCase):
"""Site framework specific test cases.
All stuff which is changing settings.SITE_ID for tests should come here.
"""
def setUp(self):
self.assertEqual(Site.objects.all().count(), 1)
with self.settings(SITE_ID=1):
u = self._create_user("test", True, True)
# setup sites
self.site2 = Site.objects.create(domain="sample2.com", name="sample2.com", pk=2)
self.site3 = Site.objects.create(domain="sample3.com", name="sample3.com", pk=3)
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_site_framework(self):
#Test the site framework, and test if it's possible to disable it
with self.settings(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
response = self.client.get("%s?site__exact=%s" % (URL_CMS_PAGE, self.site3.pk))
self.assertEqual(response.status_code, 200)
create_page("page_3b", "nav_playground.html", "de", site=self.site3)
with self.settings(SITE_ID=self.site3.pk):
create_page("page_3a", "nav_playground.html", "nl", site=self.site3)
# with param
self.assertEqual(Page.objects.on_site(self.site2.pk).count(), 1)
self.assertEqual(Page.objects.on_site(self.site3.pk).count(), 2)
self.assertEqual(Page.objects.drafts().on_site().count(), 2)
with self.settings(SITE_ID=self.site2.pk):
# without param
self.assertEqual(Page.objects.drafts().on_site().count(), 1)
def test_site_preview(self):
page = create_page("page", "nav_playground.html", "de", site=self.site2, published=True)
page_edit_url_on = self.get_edit_on_url(page.get_absolute_url('de'))
with self.login_user_context(self.get_superuser()):
# set the current site on changelist
response = self.client.post(admin_reverse('cms_page_changelist'), {'site': self.site2.pk})
self.assertEqual(response.status_code, 200)
# simulate user clicks on preview icon
response = self.client.get(admin_reverse('cms_page_preview_page', args=[page.pk, 'de']))
self.assertEqual(response.status_code, 302)
if DJANGO_2_2 or DJANGO_3_0 or DJANGO_3_1:
self.assertEqual(response._headers['location'][1], 'http://sample2.com{}&language=de'.format(page_edit_url_on))
else:
# for django3.2 and above. response.headers replace response._headers in earlier versions of django
self.assertEqual(response.headers['Location'], 'http://sample2.com{}&language=de'.format(page_edit_url_on))
def test_site_publish(self):
self._login_context.__exit__(None, None, None)
pages = {"2": list(range(0, 5)), "3": list(range(0, 5))}
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[3][1]['public'] = True
with self.settings(CMS_LANGUAGES=lang_settings, LANGUAGE_CODE="de"):
with self.settings(SITE_ID=self.site2.pk):
pages["2"][0] = create_page("page_2", "nav_playground.html", "de",
site=self.site2, published=True)
pages["2"][1] = create_page("page_2_1", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2, published=True)
pages["2"][2] = create_page("page_2_2", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2, published=True)
pages["2"][3] = create_page("page_2_1_1", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2, published=True)
pages["2"][4] = create_page("page_2_1_2", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2, published=True)
for page in pages["2"]:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
with self.settings(SITE_ID=self.site3.pk):
pages["3"][0] = create_page("page_3", "nav_playground.html", "de",
site=self.site3)
pages["3"][0].publish('de')
pages["3"][1] = create_page("page_3_1", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3, published=True)
pages["3"][2] = create_page("page_3_2", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3, published=True)
pages["3"][3] = create_page("page_3_1_1", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3, published=True)
pages["3"][4] = create_page("page_3_1_2", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3, published=True)
for page in pages["3"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
class TestSiteBoundStaticPlaceholder(SiteTestCase):
def setUp(self):
super().setUp()
with self.settings(
CMS_TEMPLATES=(('placeholder_tests/static_with_site.html', 'tpl'), ),
):
self.test_page = create_page('page', 'placeholder_tests/static_with_site.html', language='de')
def tearDown(self):
self.test_page.delete()
super().tearDown()
def test_create_site_specific_placeholder(self):
response = self.client.get(self.test_page.get_absolute_url(language='de') + '?structure')
self.assertEqual(response.status_code, 200)
| {
"content_hash": "5e510be198544c9aca5fcf161566a7fb",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 127,
"avg_line_length": 49.76119402985075,
"alnum_prop": 0.5625374925014996,
"repo_name": "datakortet/django-cms",
"id": "c869335df756b7f910052debd194571f907f6320",
"size": "6668",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "cms/tests/test_site.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "203975"
},
{
"name": "JavaScript",
"bytes": "1249081"
},
{
"name": "Python",
"bytes": "2374270"
},
{
"name": "SCSS",
"bytes": "137720"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
} |
from devinclude import *
from bustime.models import *
from bustime.views import ajax_stop_id_f
from bustime.views import bus_last_f
import ujson
import time
# import six
# from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.endpoints import serverFromString
from autobahn.wamp import types
from autobahn.twisted import wamp, websocket
# from twisted.internet.endpoints import clientFromString
import cPickle as pickle
import zmq
# import cProfile
# import logging
# LOGGER = logging.getLogger(__name__)
PORT = 9002
context = zmq.Context()
sock = context.socket(zmq.PUB)
sock.connect(ZSUB)
#
# --------------------- RPCs -------------------------------
#
def rpc_bdata(bus_id, mode, mobile):
serialized = {}
if mode == 0:
bus = bus_get(int(bus_id))
if mobile:
mobile = True
else:
mobile = False
serialized = bus_last_f(bus, raw=True, mobile=mobile)
elif mode == 1:
time.sleep(0.25) # dirty hack to avoid loading data before bus stops
serialized = REDIS.get("bdata_mode1_%s" % bus_id)
if serialized:
serialized = pickle.loads(serialized)
else:
serialized = {}
return serialized
def rpc_bootstrap_amounts(city_id):
busamounts = cache.get("busamounts_%s" % city_id)
serialized = {"busamounts": busamounts}
return serialized
def rpc_passenger(what, bus_id, r_id):
r_id = int(r_id)
bp = cache.get('bustime_passenger_%s' % bus_id, {})
bp[r_id] = bp.get(r_id, 0)
if what > 0:
bp[r_id] += 1
elif bp[r_id] > 0:
bp[r_id] -= 1
cache.set('bustime_passenger_%s' % bus_id, bp, 60 * 60)
pi = pickle.dumps({'passenger': bp}, protocol=pickle.HIGHEST_PROTOCOL)
sock.send("bdata_mode0_%s %s" % (str(bus_id), pi))
#magic_box(PassengerProtocol, [bus_id, bp])
return {}
# class PassengerProtocol(wamp.ApplicationSession):
# @inlineCallbacks
# def onJoin(self, details):
# bus_id, bp = self.config.extra
# self.publish(
# "ru.bustime.bus_mode0__%s" % bus_id, {'passenger': bp})
# self.disconnect()
def rpc_tcard(tcard_num):
tcard_num = str(tcard_num)[:20]
#tcard_num = "%016d" % int(tcard_num)
# f=open('/tmp/ffbb','a')
# f.write("%s\n"%tcard_num)
# f.close()
serialized = {}
try:
tcards = Tcard.objects.filter(num=tcard_num)
# f.write("%s\n"%tcards)
if not tcards:
tcard = Tcard.objects.create(
num=tcard_num, updated=datetime.datetime(2014, 02, 11))
else:
tcard = tcards[0]
tcard.update()
serialized["balance"] = tcard.balance
if tcard.social:
s = 1
else:
s = 0
serialized["social"] = s
# f.write("%s\n"%serialized)
except:
tcard = None
return serialized
def rpc_stop_ids(ids):
ids = ujson.loads(ids)
serialized = ajax_stop_id_f(ids, raw=True)
return serialized
#
# end RPC
#
class MyBackendComponent(wamp.ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
# regs = []
yield self.register(rpc_bdata, u'ru.bustime.rpc_bdata')
yield self.register(rpc_passenger, u'ru.bustime.rpc_passenger')
# mobile support only
yield self.register(rpc_bootstrap_amounts,
u'ru.bustime.rpc_bootstrap_amounts')
yield self.register(rpc_tcard, u'ru.bustime.rpc_tcard')
yield self.register(rpc_stop_ids, u'ru.bustime.rpc_stop_ids')
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
if __name__ == '__main__':
# log.startLogging(sys.stdout)
router_factory = wamp.RouterFactory()
session_factory = wamp.RouterSessionFactory(router_factory)
component_config = types.ComponentConfig(realm="realm1")
component_session = MyBackendComponent(component_config)
session_factory.add(component_session)
# self.setProtocolOptions(perMessageCompressionAccept = accept)
# factory.setProtocolOptions(autoPingInterval = 1, autoPingTimeout = 3, autoPingSize = 20)
transport_factory = websocket.WampWebSocketServerFactory(session_factory,
debug=False,
debug_wamp=False)
server = serverFromString(reactor, "tcp:%s" % PORT)
server.listen(transport_factory)
reactor.run()
| {
"content_hash": "12eca79a7a233eb9b3c34e40be7df488",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 94,
"avg_line_length": 29.90967741935484,
"alnum_prop": 0.6147540983606558,
"repo_name": "norn/bustime",
"id": "1acf644fc80c525c59930d009b6203c56b9dec40",
"size": "4682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zbusd/zwebsocket-pure.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56066"
},
{
"name": "HTML",
"bytes": "158846"
},
{
"name": "JavaScript",
"bytes": "166643"
},
{
"name": "PLpgSQL",
"bytes": "2143"
},
{
"name": "Python",
"bytes": "161770"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
} |
"""
These tests cover the individual code pieces related to structural variant
calling.
"""
import os
from django.conf import settings
from django.test import TestCase
import vcf
from main.models import AlignmentGroup
from main.models import Dataset
from main.models import ExperimentSample
from main.models import ExperimentSampleToAlignment
from main.models import get_dataset_with_type
from main.models import Project
from main.models import User
from main.models import Variant
from main.models import VariantAlternate
from pipeline.variant_calling import find_variants_with_tool
from pipeline.variant_calling import VARIANT_TOOL_PARAMS_MAP
from utils.import_util import add_dataset_to_entity
from utils.import_util import copy_and_add_dataset_source
from utils.import_util import copy_dataset_to_entity_data_dir
from utils.import_util import import_reference_genome_from_local_file
from testing_utils.sv_testing_utils import get_sv_variants
from testing_utils.sv_testing_utils import verify_variant_type
TEST_FASTA = os.path.join(settings.PWD, 'test_data', 'sv_testing', 'small_data',
'ref.fa')
TEST_FASTQ1 = os.path.join(settings.PWD, 'test_data', 'sv_testing', 'small_data',
'simLibrary.1.fq')
TEST_FASTQ2 = os.path.join(settings.PWD, 'test_data', 'sv_testing', 'small_data',
'simLibrary.2.fq')
TEST_SAMPLE_UID = '38d786f2'
TEST_BAM = os.path.join(settings.PWD, 'test_data', 'sv_testing', 'small_data',
'final.bam')
TEST_BAM_INDEX = os.path.join(settings.PWD, 'test_data', 'sv_testing', 'small_data',
'final.bam.bai')
# class TestSVCallers(TestCase):
# def setUp(self):
# user = User.objects.create_user('test_username', password='password',
# email='test@example.com')
# # Grab a project.
# self.project = Project.objects.create(title='test project',
# owner=user.get_profile())
# # Create a ref genome.
# self.reference_genome = import_reference_genome_from_local_file(
# self.project, 'ref_genome', TEST_FASTA, 'fasta')
# def test_end_to_end(self):
# """Test running full pipline on small-ish data.
# The data file consists of 20,000 bases. At 5,000 bases there is
# a 400 base deletion. At 10,000 bases there is a 400 base inversion.
# At 15,000 bases there is a 400 base tandem duplication.
# It seems that Pindel cannot find the inversion. Fortunately,
# delly can usually find inversions. Unfortunately, delly only
# works well on large data, so we will not test it here.
# """
# # Create a new alignment group.
# alignment_group = AlignmentGroup.objects.create(
# label='test alignment', reference_genome=self.reference_genome)
# # Create a sample.
# sample_1 = ExperimentSample.objects.create(
# uid=TEST_SAMPLE_UID,
# project=self.project,
# label='sample1')
# ### Add the raw reads
# copy_and_add_dataset_source(sample_1, Dataset.TYPE.FASTQ1,
# Dataset.TYPE.FASTQ1, TEST_FASTQ1)
# copy_and_add_dataset_source(sample_1, Dataset.TYPE.FASTQ2,
# Dataset.TYPE.FASTQ2, TEST_FASTQ2)
# # Create relationship between alignment and sample.
# sample_alignment = ExperimentSampleToAlignment.objects.create(
# alignment_group=alignment_group,
# experiment_sample=sample_1)
# ### Add alignment data. NOTE: Stored in sample model dir.
# # index (no dataset)
# copy_dataset_to_entity_data_dir(sample_1, TEST_BAM_INDEX)
# # bam file (with dataset)
# copy_dest = copy_dataset_to_entity_data_dir(sample_1, TEST_BAM)
# add_dataset_to_entity(sample_alignment, Dataset.TYPE.BWA_ALIGN,
# Dataset.TYPE.BWA_ALIGN, copy_dest)
# # Make sure there are no variants before.
# self.assertEqual(0, len(Variant.objects.filter(
# reference_genome=self.reference_genome)))
# # Test with Pindel and Delly for now.
# for tool in ['pindel', 'delly']:
# find_variants_with_tool(alignment_group,
# VARIANT_TOOL_PARAMS_MAP[tool], project=self.project)
# # Check that the alignment group has a freebayes vcf dataset associated
# # with it.
# vcf_dataset = get_dataset_with_type(alignment_group,
# Dataset.TYPE.VCF_PINDEL)
# self.assertIsNotNone(vcf_dataset)
# # Make sure the .vcf file actually exists.
# self.assertTrue(os.path.exists(vcf_dataset.get_absolute_location()))
# # Make sure the vcf is valid by reading it using pyvcf.
# with open(vcf_dataset.get_absolute_location()) as vcf_fh:
# try:
# reader = vcf.Reader(vcf_fh)
# reader.next()
# except:
# self.fail("Not valid vcf")
# # Grab the resulting variants.
# variants = Variant.objects.filter(reference_genome=self.reference_genome)
# # Confirm that 2 variants found.
# self.assertEqual(2, len(variants))
# variant_map = {}
# for variant in variants:
# variant_alternates = VariantAlternate.objects.filter(variant=variant)
# # There should be only one variant alternate per SV.
# self.assertEqual(len(variant_alternates), 1)
# pos = variant.position
# svtype = variant_alternates[0].data['INFO_SVTYPE']
# svlen = variant_alternates[0].data['INFO_SVLEN']
# variant_map[svtype] = (pos, svlen)
# # Check that there is a deletion around base 5000.
# self.assertTrue('DEL' in variant_map)
# self.assertTrue(abs(variant_map['DEL'][0] - 5000) <= 3)
# self.assertTrue(abs(variant_map['DEL'][1] - 400) <= 3)
# # Check that there is a tandem duplication around base 15000.
# self.assertTrue('DUP:TANDEM' in variant_map)
# self.assertTrue(abs(variant_map['DUP:TANDEM'][0] - 15000) <= 3)
# self.assertTrue(abs(variant_map['DUP:TANDEM'][1] - 400) <= 3)
| {
"content_hash": "9d434b535703dc99ce488ca4941db39f",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 84,
"avg_line_length": 39.76282051282051,
"alnum_prop": 0.6363050137030469,
"repo_name": "woodymit/millstone_accidental_source",
"id": "d7e1fd8cd808bd663df0a0ac659db87570c1ac79",
"size": "6203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genome_designer/pipeline/tests/test_sv_calling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11461"
},
{
"name": "CoffeeScript",
"bytes": "3226"
},
{
"name": "HTML",
"bytes": "76254"
},
{
"name": "JavaScript",
"bytes": "140841"
},
{
"name": "Python",
"bytes": "1009103"
},
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import shutil
import subprocess
import re
import yaml
# This is here because of a bug that causes yaml
# to incorrectly handle timezone info on timestamps
def timestamp_constructor(_, node):
'''return timestamps as strings'''
return str(node.value)
yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0])
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname):
'''return all pods '''
return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
def _get(self, resource, rname=None):
'''return a secret by name '''
cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
'''Base command for oc '''
#cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
print
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype=None):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(data):
'''Turn an array of dict: filename, content into a files array'''
files = []
for sfile in data:
path = Utils.create_file(sfile['path'], sfile['content'])
files.append(path)
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list'
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print api_values
print user_values
print "keys are not equal in dict"
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print value
print user_def[key]
return False
return True
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
def __init__(self, filename=None, content=None, content_type='yaml'):
self.content = content
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
if self.filename and not self.content:
self.load(content_type=self.content_type)
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def remove_entry(data, key):
''' remove data at location key '''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
curr_data = data
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key):
data = data[dict_key]
continue
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for add
# expected list entry
if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return curr_data
@staticmethod
def get_entry(data, key):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
with open(self.filename, 'w') as yfd:
yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
def read(self):
''' write to file '''
# check if it exists
if not self.exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents:
return None
# check if it is yaml
try:
if content_type == 'yaml':
self.yaml_dict = yaml.load(contents)
elif content_type == 'json':
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as _:
# Error loading yaml or json
return None
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
return entry
def delete(self, key):
''' remove key from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if not entry:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, key)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def put(self, key, value):
''' put key, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
result = Yedit.add_entry(self.yaml_dict, key, value)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def create(self, key, value):
''' create a yaml file '''
if not self.exists():
self.yaml_dict = {key: value}
return (True, self.yaml_dict)
return (False, self.yaml_dict)
import time
class RouterConfig(object):
''' RouterConfig is a DTO for the router. '''
def __init__(self, rname, kubeconfig, router_options):
self.name = rname
self.kubeconfig = kubeconfig
self._router_options = router_options
@property
def router_options(self):
''' return router options '''
return self._router_options
def to_option_list(self):
''' return all options as a string'''
return RouterConfig.stringify(self.router_options)
@staticmethod
def stringify(options):
''' return hash as list of key value pairs '''
rval = []
for key, data in options.items():
if data['include'] and data['value']:
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class Router(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self,
router_config,
verbose=False):
''' Constructor for OpenshiftOC
a router consists of 3 or more parts
- dc/router
- svc/router
- endpoint/router
'''
super(Router, self).__init__('default', router_config.kubeconfig, verbose)
self.rconfig = router_config
self.verbose = verbose
self.router_parts = [{'kind': 'dc', 'name': self.rconfig.name},
{'kind': 'svc', 'name': self.rconfig.name},
#{'kind': 'endpoints', 'name': self.rconfig.name},
]
def get(self, filter_kind=None):
''' return the self.router_parts '''
rparts = self.router_parts
parts = []
if filter_kind:
rparts = [part for part in self.router_parts if filter_kind == part['kind']]
for part in rparts:
parts.append(self._get(part['kind'], rname=part['name']))
return parts
def exists(self):
'''return a deploymentconfig by name '''
parts = self.get()
for part in parts:
if part['returncode'] != 0:
return False
return True
def delete(self):
'''return all pods '''
parts = []
for part in self.router_parts:
parts.append(self._delete(part['kind'], part['name']))
return parts
def create(self, dryrun=False, output=False, output_type='json'):
'''Create a deploymentconfig '''
# We need to create the pem file
router_pem = '/tmp/router.pem'
with open(router_pem, 'w') as rfd:
rfd.write(open(self.rconfig.router_options['cert_file']['value']).read())
rfd.write(open(self.rconfig.router_options['key_file']['value']).read())
atexit.register(Utils.cleanup, [router_pem])
self.rconfig.router_options['default_cert']['value'] = router_pem
options = self.rconfig.to_option_list()
cmd = ['router']
cmd.extend(options)
if dryrun:
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=output, output_type=output_type)
return results
def update(self):
'''run update for the router. This performs a delete and then create '''
parts = self.delete()
if any([part['returncode'] != 0 for part in parts]):
return parts
# Ugly built in sleep here.
time.sleep(15)
return self.create()
def needs_update(self, verbose=False):
''' check to see if we need to update '''
dc_inmem = self.get(filter_kind='dc')[0]
if dc_inmem['returncode'] != 0:
return dc_inmem
user_dc = self.create(dryrun=True, output=True, output_type='raw')
if user_dc['returncode'] != 0:
return user_dc
# Since the output from oadm_router is returned as raw
# we need to parse it. The first line is the stats_password
user_dc_results = user_dc['results'].split('\n')
# stats_password = user_dc_results[0]
# Load the string back into json and get the newly created dc
user_dc = json.loads('\n'.join(user_dc_results[1:]))['items'][0]
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
if not self.rconfig.router_options['stats_password']['value']:
for idx, env_var in enumerate(user_dc['spec']['template']['spec']['containers'][0]['env']):
if env_var['name'] == 'STATS_PASSWORD':
env_var['value'] = \
dc_inmem['results'][0]['spec']['template']['spec']['containers'][0]['env'][idx]['value']
# dry-run doesn't add the protocol to the ports section. We will manually do that.
for idx, port in enumerate(user_dc['spec']['template']['spec']['containers'][0]['ports']):
if not port.has_key('protocol'):
port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'rollingParams',
]
return not Utils.check_def_equal(user_dc, dc_inmem['results'][0], skip_keys=skip, debug=verbose)
def main():
'''
ansible oc module for secrets
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default='router', type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
credentials=dict(default='/etc/origin/master/openshift-router.kubeconfig', type='str'),
cert_file=dict(default=None, type='str'),
key_file=dict(default=None, type='str'),
image=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_image=dict(default=False, type='bool'),
labels=dict(default=None, type='list'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='router', type='str'),
router_type=dict(default='haproxy-router', type='str'),
host_network=dict(default=True, type='bool'),
# external host options
external_host=dict(default=None, type='str'),
external_host_vserver=dict(default=None, type='str'),
external_host_insecure=dict(default=False, type='bool'),
external_host_partition_path=dict(default=None, type='str'),
external_host_username=dict(default=None, type='str'),
external_host_password=dict(default=None, type='str'),
external_host_private_key=dict(default=None, type='str'),
# Metrics
expose_metrics=dict(default=False, type='bool'),
metrics_image=dict(default=None, type='str'),
# Stats
stats_user=dict(default=None, type='str'),
stats_password=dict(default=None, type='str'),
stats_port=dict(default=1936, type='int'),
),
mutually_exclusive=[["router_type", "images"]],
supports_check_mode=True,
)
rconfig = RouterConfig(module.params['name'],
module.params['kubeconfig'],
{'credentials': {'value': module.params['credentials'], 'include': True},
'default_cert': {'value': None, 'include': True},
'cert_file': {'value': module.params['cert_file'], 'include': False},
'key_file': {'value': module.params['key_file'], 'include': False},
'image': {'value': module.params['image'], 'include': True},
'latest_image': {'value': module.params['latest_image'], 'include': True},
'labels': {'value': module.params['labels'], 'include': True},
'ports': {'value': ','.join(module.params['ports']), 'include': True},
'replicas': {'value': module.params['replicas'], 'include': True},
'selector': {'value': module.params['selector'], 'include': True},
'service_account': {'value': module.params['service_account'], 'include': True},
'router_type': {'value': module.params['router_type'], 'include': False},
'host_network': {'value': module.params['host_network'], 'include': True},
'external_host': {'value': module.params['external_host'], 'include': True},
'external_host_vserver': {'value': module.params['external_host_vserver'],
'include': True},
'external_host_insecure': {'value': module.params['external_host_insecure'],
'include': True},
'external_host_partition_path': {'value': module.params['external_host_partition_path'],
'include': True},
'external_host_username': {'value': module.params['external_host_username'],
'include': True},
'external_host_password': {'value': module.params['external_host_password'],
'include': True},
'external_host_private_key': {'value': module.params['external_host_private_key'],
'include': True},
'expose_metrics': {'value': module.params['expose_metrics'], 'include': True},
'metrics_image': {'value': module.params['metrics_image'], 'include': True},
'stats_user': {'value': module.params['stats_user'], 'include': True},
'stats_password': {'value': module.params['stats_password'], 'include': True},
'stats_port': {'value': module.params['stats_port'], 'include': True},
})
ocrouter = Router(rconfig)
state = module.params['state']
########
# Delete
########
if state == 'absent':
if not ocrouter.exists():
module.exit_json(changed=False, state="absent")
if module.check_mode:
module.exit_json(change=False, msg='Would have performed a delete.')
api_rval = ocrouter.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
if not ocrouter.exists():
if module.check_mode:
module.exit_json(change=False, msg='Would have performed a create.')
api_rval = ocrouter.create()
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if not ocrouter.needs_update():
module.exit_json(changed=False, state="present")
if module.check_mode:
module.exit_json(change=False, msg='Would have performed an update.')
api_rval = ocrouter.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| {
"content_hash": "6941a0028206ce0d679d352892c0dee7",
"timestamp": "",
"source": "github",
"line_count": 799,
"max_line_length": 116,
"avg_line_length": 35.66207759699625,
"alnum_prop": 0.5187407875342177,
"repo_name": "twiest/openshift-tools",
"id": "c6b45c14e067371d48b602fd6f434a76fede8ff0",
"size": "28872",
"binary": false,
"copies": "9",
"ref": "refs/heads/stg",
"path": "openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/lib_openshift_api/library/oadm_router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Go",
"bytes": "382164"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "102550"
},
{
"name": "JavaScript",
"bytes": "1580"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "27786029"
},
{
"name": "Shell",
"bytes": "1378677"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
from django.test import TransactionTestCase
from reversion.models import Version
from waldur_core.core.utils import silent_call
from . import models as test_models
class TestQuotaField(TransactionTestCase):
def test_quota_is_automatically_created_with_scope(self):
scope = test_models.GrandparentModel.objects.create()
self.assertTrue(scope.quotas.filter(name=test_models.GrandparentModel.Quotas.regular_quota).exists())
def test_quota_limit_field_create(self):
child = test_models.GrandparentModel.objects.create(regular_quota=7)
self.assertEqual(child.quotas.get(name='regular_quota').limit, 7)
def test_quota_limit_field_update(self):
child = test_models.GrandparentModel.objects.create()
child.regular_quota = 9
child.save()
self.assertEqual(child.quotas.get(name='regular_quota').limit, 9)
# XXX: Ideally this method should belong to ReversionMixin tests and
# should be separated into several smaller tests.
def test_quota_versions(self):
scope = test_models.GrandparentModel.objects.create()
quota = scope.quotas.get(name=test_models.GrandparentModel.Quotas.regular_quota)
quota.usage = 13.0
quota.save()
# make sure that new version was created after quota usage change.
latest_version = Version.objects.get_for_object(quota).latest('revision__date_created')
self.assertEqual(latest_version._object_version.object.usage, quota.usage)
# make sure that new version was not created if object was saved without data change.
quota.usage = 13
quota.save()
new_latest_version = Version.objects.get_for_object(quota).latest('revision__date_created')
self.assertEqual(new_latest_version, latest_version)
class TestCounterQuotaField(TransactionTestCase):
def setUp(self):
self.grandparent = test_models.GrandparentModel.objects.create()
self.parent = test_models.ParentModel.objects.create(parent=self.grandparent)
self.child = test_models.ChildModel.objects.create(parent=self.parent)
self.quota_field = test_models.ParentModel.Quotas.counter_quota
def test_counter_quota_usage_is_increased_on_child_creation(self):
quota = self.parent.quotas.get(name=self.quota_field)
self.assertEqual(quota.usage, 1)
def test_counter_quota_usage_is_decreased_on_child_deletion(self):
self.child.delete()
quota = self.parent.quotas.get(name=self.quota_field)
self.assertEqual(quota.usage, 0)
def test_counter_quota_usage_is_right_after_recalculation(self):
quota = self.parent.quotas.get(name=self.quota_field)
quota.usage = 3
quota.save()
silent_call('recalculatequotas')
quota = self.parent.quotas.get(name=self.quota_field)
self.assertEqual(quota.usage, 1)
def test_counter_quota_usage_is_working_with_two_models_as_targets(self):
self.parent.second_children.create()
quota = self.parent.quotas.get(name=test_models.ParentModel.Quotas.two_targets_counter_quota)
self.assertEqual(quota.usage, 2)
def test_delta_quota_usage_is_increased_on_child_creation(self):
quota = self.parent.quotas.get(name=test_models.ParentModel.Quotas.delta_quota)
self.assertEqual(quota.usage, 10)
def test_delta_quota_usage_is_decreased_on_child_deletion(self):
self.child.delete()
quota = self.parent.quotas.get(name=test_models.ParentModel.Quotas.delta_quota)
self.assertEqual(quota.usage, 0)
class TestTotalQuotaField(TransactionTestCase):
def setUp(self):
self.grandparent = test_models.GrandparentModel.objects.create()
self.parent = test_models.ParentModel.objects.create(parent=self.grandparent)
self.child = test_models.SecondChildModel.objects.create(parent=self.parent, size=100)
self.quota_field = test_models.ParentModel.Quotas.total_quota
def test_counter_quota_usage_is_increased_on_child_creation(self):
quota = self.parent.quotas.get(name=self.quota_field)
self.assertEqual(quota.usage, 100)
def test_counter_quota_usage_is_decreased_on_child_deletion(self):
self.child.delete()
quota = self.parent.quotas.get(name=self.quota_field)
self.assertEqual(quota.usage, 0)
def test_counter_quota_usage_is_right_after_recalculation(self):
quota = self.parent.quotas.get(name=self.quota_field)
quota.usage = 0
quota.save()
silent_call('recalculatequotas')
quota = self.parent.quotas.get(name=self.quota_field)
self.assertEqual(quota.usage, 100)
class TestUsageAggregatorField(TransactionTestCase):
def setUp(self):
self.grandparent = test_models.GrandparentModel.objects.create()
self.parents = [test_models.ParentModel.objects.create(parent=self.grandparent) for _ in range(2)]
non_quota_parent = test_models.NonQuotaParentModel.objects.create()
self.children = [test_models.ChildModel.objects.create(parent=parent, non_quota_parent=non_quota_parent)
for parent in self.parents]
self.child_quota_field = test_models.ChildModel.Quotas.usage_aggregator_quota
self.parent_quota_field = test_models.ParentModel.Quotas.usage_aggregator_quota
self.grandparent_quota_field = test_models.GrandparentModel.Quotas.usage_aggregator_quota
def test_aggregator_usage_increases_on_child_quota_usage_increase(self):
usage_value = 10
for child in self.children:
quota = child.quotas.get(name=self.child_quota_field)
quota.usage = usage_value
quota.save()
for parent in self.parents:
quota = parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, usage_value)
quota = self.grandparent.quotas.get(name=self.grandparent_quota_field)
self.assertEqual(quota.usage, usage_value * len(self.children))
def test_aggregator_usage_decreases_on_child_deletion(self):
usage_value = 10
for child in self.children:
quota = child.quotas.get(name=self.child_quota_field)
quota.usage = usage_value
quota.save()
first_child = self.children[0]
first_child.delete()
quota = first_child.parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, 0)
quota = self.grandparent.quotas.get(name=self.grandparent_quota_field)
self.assertEqual(quota.usage, usage_value)
def test_aggregator_usage_increases_on_child_creation(self):
usage_value = 10
test_models.ChildModel.Quotas.usage_aggregator_quota.default_usage = usage_value
parent = self.parents[0]
test_models.ChildModel.objects.create(parent=parent)
quota = parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, usage_value)
quota = self.grandparent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, usage_value)
def test_usage_aggregator_recalculation(self):
usage_value = 10
for child in self.children:
quota = child.quotas.get(name=self.child_quota_field)
quota.usage = usage_value
quota.save()
# set quota as wrong number to test recalculation
for parent in self.parents:
parent.set_quota_usage(self.parent_quota_field, 666)
self.grandparent.set_quota_usage(self.grandparent_quota_field, 1232)
silent_call('recalculatequotas')
for parent in self.parents:
quota = parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, usage_value)
quota = self.grandparent.quotas.get(name=self.grandparent_quota_field)
self.assertEqual(quota.usage, usage_value * len(self.children))
def test_usage_aggregator_quota_works_with_specified_child_quota_name(self):
usage_value = 10
for child in self.children:
quota = child.quotas.get(name=self.child_quota_field)
quota.usage = usage_value
quota.save()
# second_usage_aggregator_quota quota should increases too
for parent in self.parents:
quota = parent.quotas.get(name=test_models.ParentModel.Quotas.second_usage_aggregator_quota)
self.assertEqual(quota.usage, usage_value)
class TestLimitAggregatorField(TransactionTestCase):
def setUp(self):
self.grandparent = test_models.GrandparentModel.objects.create()
self.parents = [test_models.ParentModel.objects.create(parent=self.grandparent) for _ in range(2)]
non_quota_parent = test_models.NonQuotaParentModel.objects.create()
self.children = [test_models.ChildModel.objects.create(parent=parent, non_quota_parent=non_quota_parent)
for parent in self.parents]
self.child_quota_field = test_models.ChildModel.Quotas.limit_aggregator_quota
self.parent_quota_field = test_models.ParentModel.Quotas.limit_aggregator_quota
self.grandparent_quota_field = test_models.GrandparentModel.Quotas.limit_aggregator_quota
def test_aggregator_usage_increases_on_child_quota_limit_increase(self):
limit_value = 10
for child in self.children:
quota = child.quotas.get(name=self.child_quota_field)
quota.limit = limit_value
quota.save()
for parent in self.parents:
quota = parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, limit_value)
quota = self.grandparent.quotas.get(name=self.grandparent_quota_field)
self.assertEqual(quota.usage, limit_value * len(self.children))
def test_aggregator_usage_decreases_on_child_deletion(self):
limit_value = 10
for child in self.children:
quota = child.quotas.get(name=self.child_quota_field)
quota.limit = limit_value
quota.save()
first_child = self.children[0]
first_child.delete()
quota = first_child.parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, 0)
quota = self.grandparent.quotas.get(name=self.grandparent_quota_field)
self.assertEqual(quota.usage, limit_value)
def test_aggregator_usage_increases_on_child_creation(self):
limit_value = 10
test_models.ChildModel.Quotas.limit_aggregator_quota.default_limit = limit_value
parent = self.parents[0]
test_models.ChildModel.objects.create(parent=parent)
quota = parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, limit_value)
quota = self.grandparent.quotas.get(name=self.grandparent_quota_field)
self.assertEqual(quota.usage, limit_value)
def test_limit_aggregator_recalculation(self):
limit_value = 10
for child in self.children:
quota = child.quotas.get(name=self.child_quota_field)
quota.limit = limit_value
quota.save()
# set quota as wrong number to test recalculation
for parent in self.parents:
parent.set_quota_limit(self.parent_quota_field, 666)
self.grandparent.set_quota_limit(self.grandparent_quota_field, 1232)
silent_call('recalculatequotas')
for parent in self.parents:
quota = parent.quotas.get(name=self.parent_quota_field)
self.assertEqual(quota.usage, limit_value)
quota = self.grandparent.quotas.get(name=self.grandparent_quota_field)
self.assertEqual(quota.usage, limit_value * len(self.children))
| {
"content_hash": "ab0eac03503c60e38bcb029c34867c69",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 112,
"avg_line_length": 43.26470588235294,
"alnum_prop": 0.6860129163834127,
"repo_name": "opennode/nodeconductor",
"id": "9f1933f22797da2134dda194ce908f35a7f461cd",
"size": "11768",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "waldur_core/quotas/tests/test_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1877"
},
{
"name": "HTML",
"bytes": "17528"
},
{
"name": "JavaScript",
"bytes": "248900"
},
{
"name": "Python",
"bytes": "1254720"
}
],
"symlink_target": ""
} |
import re
class File:
@staticmethod
def read(filename):
with open(filename,'rb') as f:
contents = f.read()
return contents
class Textops:
@staticmethod
def sentences(text):
sentences = re.findall(r'[^.!\?]+[\.!\?]',text)
return sentences
| {
"content_hash": "0962d09946c938eef694f38c046b3f12",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 49,
"avg_line_length": 16.5,
"alnum_prop": 0.6439393939393939,
"repo_name": "dluman/inker",
"id": "eb4de3a9a1bb849e22adc78be90e2b5d5a3cdb7a",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corpus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10890"
}
],
"symlink_target": ""
} |
DEBUG = False
SERVER_PORT = 8895
GUNICORN_WORKERS = 4 | {
"content_hash": "e59dbd11a63f19bf650ac4c3240ea3da",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 20,
"avg_line_length": 11,
"alnum_prop": 0.7272727272727273,
"repo_name": "dropbox-dashbpard/error-detect-of-log",
"id": "8524c8fa83de902558f84e6ca16ad952950761bb",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/prod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27885"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
} |
"""
django-fhir
FILE: __init__.py
Created: 1/6/16 5:07 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
# Hello World is here to test the loading of the module from fhir.settings
# from .settings import *
#from fhir_io_hapi.views.get import hello_world
#from fhir_io_hapi.views.delete import delete
#from fhir_io_hapi.views.get import (read, vread, history)
#from fhir_io_hapi.views.search import find
# Used to load post_save signal for write to backend fhir server
default_app_config = 'fhir_io_hapi.apps.fhir_io_hapi_config'
| {
"content_hash": "3b29f88386b17c762c5ef8182b6a0d7a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 23.347826086956523,
"alnum_prop": 0.7318435754189944,
"repo_name": "ekivemark/BlueButtonFHIR_API",
"id": "d439b10ad2712c0ad9053027d3f3597ce116ba2b",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fhir_io_hapi/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "251473"
},
{
"name": "Dockerfile",
"bytes": "326"
},
{
"name": "HTML",
"bytes": "474740"
},
{
"name": "JavaScript",
"bytes": "3335"
},
{
"name": "Python",
"bytes": "419723"
},
{
"name": "Shell",
"bytes": "940"
}
],
"symlink_target": ""
} |
class InvalidExpression(Exception):
pass
class WriterNotSupportingGroupedBatch(Exception):
pass
class ConfigurationError(ValueError):
"Configuration provided isn't valid."
class InvalidDateRangeError(ValueError):
"Empty or impossible date range"
class UnsupportedCompressionFormat(ValueError):
"Unsupported compression format."
class ConfigCheckError(ConfigurationError):
def __init__(self, message="Configuration provided isn't valid.", errors={}):
super(ConfigCheckError, self).__init__(message)
self.errors = errors
def __str__(self):
if not self.errors:
return self.message
error_messages = []
for section, errors in self.errors.iteritems():
if isinstance(errors, basestring):
error_messages.append('{}: {}'.format(section, errors))
else:
section_errors = '\n'.join(
' {}: {}'.format(field, error) for field, error in errors.iteritems())
error_messages.append('{}:\n{}'.format(section, section_errors))
return '{}\n{}'.format(self.message, '\n'.join(error_messages))
| {
"content_hash": "cbbc1c9ef99e6c02be69f565bfccd788",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 91,
"avg_line_length": 31.43243243243243,
"alnum_prop": 0.6354256233877902,
"repo_name": "scrapinghub/exporters",
"id": "1af91deef07783ab07493eb2ab94275072e1d8cf",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exporters/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "501414"
}
],
"symlink_target": ""
} |
import datetime
from plugin.svtplay.utils import logger
# TODO: Combine with episode (?)
class SvtVideo(object):
def __init__(self, url, title, description, duration, published_at, genres,
image_url):
self.url = url
self.title = title
self.description = description
self.duration = duration
self.published_at = published_at
self.genres = genres
self.image_url = image_url
string = "Creating SvtVideo with\n" + \
" url: " + str(self.url) + "\n" + \
" title: " + str(self.title) + "\n" + \
" description: " + str(self.description) + "\n" + \
" duration: " + str(self.duration) + "\n" + \
" published_at: " + str(self.published_at) + "\n" + \
" genres: " + str(self.genres) + "\n" + \
" image_url: " + str(self.image_url)
logger().debug(string)
def __repr__(self):
return "SvtVideo(\"%s\")" % (self.title, )
| {
"content_hash": "4e305620060a30f5837b35f499b8000a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 33.25806451612903,
"alnum_prop": 0.5063045586808923,
"repo_name": "dstenb/pylaunchr-svtplay",
"id": "ee0037c9e8835738a493ef363607e20c42340980",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "items/video.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50371"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import re
import functools
from turbo.conf import app_config
try:
from jinja2 import Environment, FileSystemLoader
except ImportError:
raise ImportError('jinja2 module ImportError')
class Jinja2Environment(Environment):
"""find template location
according to current parent and template relative path to find template path
args:
template current template that needs to locate
parent which call template with extends or include directive
return:
real template path
example:
input:
template ../../base.html
parent app/app/index.html
output:
base.html
input:
template header.html
parent app/app/index.html
output:
app/app/header.html
input:
template ../header.html
parent app/app/index.html
output:
app/header.html
"""
relative_path = re.compile('(./|../)', re.IGNORECASE)
relative_dir = re.compile('([^/\s]{1,}/)', re.IGNORECASE) # noqa
real_name = re.compile('([^/\s]{1,}$)') # noqa
def join_path(self, template, parent):
t_group = self.relative_path.findall(template)
p_group = self.relative_dir.findall(parent)
t_group_length = len(t_group)
template_name = template
#
real_template_path = p_group
if t_group_length:
template_name = self.real_name.match(
template, template.rfind('/') + 1).group()
real_template_path = p_group[0:0 - t_group_length]
real_template_path.append(template_name)
return ''.join(real_template_path)
def turbo_jinja2(func):
_jinja2_env = Jinja2Environment(loader=FileSystemLoader(app_config.web_application_setting[
'template_path']), auto_reload=app_config.web_application_setting['debug'])
@functools.wraps(func)
def wrapper(self, template_name, **kwargs):
template = _jinja2_env.get_template(
('%s%s') % (self.template_path, template_name))
return template.render(handler=self, request=self.request, xsrf_form_html=self.xsrf_form_html(),
context=self.get_context(), **kwargs)
return wrapper
| {
"content_hash": "7c4d000ffdd7e10289c25ce0d499e69f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 111,
"avg_line_length": 29.72151898734177,
"alnum_prop": 0.6068994889267462,
"repo_name": "wecatch/app-turbo",
"id": "fa34480e228b9407fc2f7f4eb79024dc0c0a0b9e",
"size": "2371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbo/template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2127"
},
{
"name": "Python",
"bytes": "113614"
}
],
"symlink_target": ""
} |
__author__ = 'giacomov'
from astromodels.core.tree import Node
from astromodels.core.parameter import Parameter
class Polarization(Node):
def __init__(self, polarization_type='linear'):
assert polarization_type in ['linear', 'stokes'], 'polarization must be linear or stokes'
self._polarization_type = polarization_type
Node.__init__(self, 'polarization')
@staticmethod
def _get_parameter_from_input(number_or_parameter, minimum, maximum, what, desc, unit):
# Try to transform it to float, if it works than we transform it to a parameter
try:
number_or_parameter = float(number_or_parameter)
except TypeError:
assert isinstance(number_or_parameter, Parameter), "%s must be either a number or a " \
"parameter instance" % what
# So this is a Parameter instance already. Enforce that it has the right maximum and minimum
parameter = number_or_parameter
assert parameter.min_value == minimum, "%s must have a minimum of %s" % (what, minimum)
assert parameter.max_value == maximum, "%s must have a maximum of %s" % (what, maximum)
else:
# This was a float. Enforce that it has a legal value
assert minimum <= number_or_parameter <= maximum, "%s cannot have a value of %s, " \
"it must be %s <= %s <= %s" % (what, number_or_parameter,
minimum, what, maximum)
parameter = Parameter(what, number_or_parameter,
desc=desc, min_value=minimum, max_value=maximum, unit=unit, free=True)
return parameter
# TODO: add transform between polarizations
class LinearPolarization(Polarization):
def __init__(self, degree, angle):
"""
Linear parameterization of polarization
:param degree: The polarization degree
:param angle: The polarization angle
"""
super(LinearPolarization, self).__init__(polarization_type='linear')
degree = self._get_parameter_from_input(degree, 0, 100, 'degree', 'Polarization degree', 'dimensionless_unscaled')
angle = self._get_parameter_from_input(angle, 0, 180, 'angle', 'Polarization angle', 'deg')
self._add_child(degree)
self._add_child(angle)
class StokesPolarization(Polarization):
def __init__(self, I, Q, U, V):
"""
Stokes parameterization of polarization
:param I:
:param Q:
:param U:
:param V:
"""
super(StokesPolarization, self).__init__(polarization_type='stokes')
# get the parameters set up
I = self._get_parameter_from_input(I, 0, 1, 'I', 'Stokes I')
Q = self._get_parameter_from_input(Q, 0, 1, 'Q', 'Stokes Q')
U = self._get_parameter_from_input(U, 0, 1, 'U', 'Stokes U')
V = self._get_parameter_from_input(V, 0, 1, 'V', 'Stokes V')
# add the children
self._add_child(I)
self._add_child(Q)
self._add_child(U)
self._add_child(V)
# def to_linear_polarization(self):
# # polarization angle
# # psi = 0.5 * np.arctan2(U_bin, Q_bin)
#
# # polarization fraction
# # frac = np.sqrt(Q_bin ** 2 + U_bin ** 2) / I_bin
#
# pass
#
# #angle = 0.5 * np.arctan2(se)
#
#
| {
"content_hash": "7cdaa49bf253f27c72870e66e5367210",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 122,
"avg_line_length": 31.04385964912281,
"alnum_prop": 0.5637185645662617,
"repo_name": "grburgess/astromodels",
"id": "9c0f5b469d2e4d7edb4ddd475a591af6c7e05945",
"size": "3539",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astromodels/core/polarization.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "131761"
},
{
"name": "Dockerfile",
"bytes": "840"
},
{
"name": "Python",
"bytes": "491102"
},
{
"name": "Shell",
"bytes": "7049"
}
],
"symlink_target": ""
} |
"""Tests that all parsers and plugins are imported correctly."""
from __future__ import unicode_literals
import glob
import os
import unittest
from tests import test_lib
class ParserImportTest(test_lib.ImportCheckTestCase):
"""Tests that parser classes are imported correctly."""
_IGNORABLE_FILES = frozenset([
'dtfabric_parser.py', 'dtfabric_plugin.py', 'logger.py', 'manager.py',
'presets.py', 'mediator.py', 'interface.py', 'plugins.py'])
def testParsersImported(self):
"""Tests that all parsers are imported."""
self._AssertFilesImportedInInit(
test_lib.PARSERS_PATH, self._IGNORABLE_FILES)
def testPluginsImported(self):
"""Tests that all plugins are imported."""
parsers_glob = '{0:s}/*_plugins/'.format(test_lib.PARSERS_PATH)
plugin_directories = glob.glob(parsers_glob)
for plugin_directory in plugin_directories:
plugin_directory_path = os.path.join(
test_lib.PARSERS_PATH, plugin_directory)
self._AssertFilesImportedInInit(
plugin_directory_path, self._IGNORABLE_FILES)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ea8e7e74fd256a2a0be5591ba8cbcd1d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 31.083333333333332,
"alnum_prop": 0.6952636282394995,
"repo_name": "rgayon/plaso",
"id": "39c232aee27622db126b0be259d637218da402af",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsers/init_imports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "415"
},
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "PowerShell",
"bytes": "17771"
},
{
"name": "Python",
"bytes": "4803191"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "46225"
}
],
"symlink_target": ""
} |
'''GRIT tool that runs the unit test suite for GRIT.'''
from __future__ import print_function
import getopt
import sys
import unittest
try:
import grit.test_suite_all
except ImportError:
pass
from grit.tool import interface
class UnitTestTool(interface.Tool):
'''By using this tool (e.g. 'grit unit') you run all the unit tests for GRIT.
This happens in the environment that is set up by the basic GRIT runner.'''
def ShortDescription(self):
return 'Use this tool to run all the unit tests for GRIT.'
def ParseOptions(self, args):
"""Set this objects and return all non-option arguments."""
own_opts, args = getopt.getopt(args, '', ('help',))
for key, val in own_opts:
if key == '--help':
self.ShowUsage()
sys.exit(0)
return args
def Run(self, opts, args):
args = self.ParseOptions(args)
if args:
print('This tool takes no arguments.')
return 2
return unittest.TextTestRunner(verbosity=2).run(
grit.test_suite_all.TestSuiteAll())
| {
"content_hash": "dc9787af766cacdea323166c505603db",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 26.23076923076923,
"alnum_prop": 0.6774193548387096,
"repo_name": "endlessm/chromium-browser",
"id": "7e96b699c3c4566eca2815c839e96391396e2a14",
"size": "1190",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/grit/grit/tool/unit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0016_auto_20170322_0342'),
]
operations = [
migrations.AlterField(
model_name='group',
name='name',
field=models.CharField(unique=True, max_length=100, verbose_name='Name', db_index=True),
),
migrations.AlterField(
model_name='skill',
name='name',
field=models.CharField(unique=True, max_length=100, verbose_name='Name', db_index=True),
),
]
| {
"content_hash": "058c46c5b28c2674ff3c0a0f67187055",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 100,
"avg_line_length": 27.130434782608695,
"alnum_prop": 0.5865384615384616,
"repo_name": "fxa90id/mozillians",
"id": "1402de832d3688989f54837ae62741f4ff6888f1",
"size": "648",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mozillians/groups/migrations/0017_auto_20170322_0710.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "181742"
},
{
"name": "HTML",
"bytes": "165063"
},
{
"name": "JavaScript",
"bytes": "141584"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "887164"
},
{
"name": "Shell",
"bytes": "1332"
}
],
"symlink_target": ""
} |
from unittest import skip
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.Input.WaterBudget import SatStorCarryOver
class TestSatStorCarryOver(VariableUnitTest):
@skip("not ready")
def test_SatStorCarryOver(self):
z = self.z
np.testing.assert_array_almost_equal(
SatStorCarryOver.SatStorCarryOver_2(),
SatStorCarryOver.SatStorCarryOver(), decimal=7)
| {
"content_hash": "7bffc2a88aee38993f22979477dcb23e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 28.733333333333334,
"alnum_prop": 0.7331786542923434,
"repo_name": "WikiWatershed/gwlf-e",
"id": "bf97f74f07dc0c905e91d1599bcc5306eb7f3883",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/unittests/test_SatStorCarryOver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAMS",
"bytes": "5930291"
},
{
"name": "Python",
"bytes": "775719"
}
],
"symlink_target": ""
} |
import os
import sys
import glob
import pkgutil
import tarfile
import zipfile
import warnings
import collections
if sys.version_info >= (3,):
import io as StringIO
import configparser as ConfigParser
else:
import StringIO
import ConfigParser
from email.parser import FeedParser
from .utils import ext, tar_files, zip_files
class MetadataFileParser(object):
def __init__(self, data, name):
self.MAP = {
'PKG-INFO': self.pkg_info,
'SOURCES.txt': self.list,
'top_level.txt': self.list,
'requires.txt': self.req,
'dependency_links.txt': self.list,
'installed-files.txt': self.list,
'entry_points.txt': self.config,
}
self.data = data
self.name = name
if not self.name:
raise TypeError('Invalid file name: {0}'.format(self.name))
def parse(self):
try:
return self.MAP[self.name]()
except (KeyError, ConfigParser.MissingSectionHeaderError):
return {}
def pkg_info(self):
d = {}
f = FeedParser()
f.feed(self.data)
d.update(list(f.close().items()))
return d
def list(self, add_sections=False):
d = []
for line in self.data.splitlines():
line = line.strip()
if not line or (not add_sections and (line.startswith('[') and line.endswith(']'))):
continue
d.append(line)
return d
def req(self):
def is_section(s):
return (s[0], s[-1]) == ('[', ']') and s[1:-1]
reqs = {
'install': set(),
'extras': collections.defaultdict(set)
}
cursect = None
for r in self.list(True):
s = is_section(r)
if s:
reqs['extras'][s] = set()
cursect = s
elif cursect:
reqs['extras'][cursect].add(r)
else:
reqs['install'].add(r)
reqs['extras'] = dict(reqs['extras'])
return reqs
def config(self):
d = {}
p = ConfigParser.ConfigParser()
p.readfp(StringIO.StringIO(self.data))
for s in p.sections():
d[s] = dict(p.items(s))
return d
class Dist(object):
'''
This is the base class for all other objects. It requires a list of tuples (``(file_data, file_name)``) and provides some attributes/methods:
.. attribute:: name
The package's name.
.. attribute:: version
The package's current version.
.. attribute:: as_req
The string that represents the parsed requirement.
.. attribute:: requires
.. versionadded: 0.7.1
This distribution's requirements (including extras).
.. attribute:: location
.. versionadded: 0.7
The distribution's metadata location.
.. attribute:: files
All the files parsed by this distribution.
.. attribute:: has_metadata
This attribute is True when the distribution has some metadata, False otherwise.
.. attribute:: pkg_info
Returns PKG-INFO's data. Equivalent to ``Dist.file('PKG-INFO')``.
.. attribute:: zip_safe
False whether the package has a :file:`not-zip-safe` file, True otherwise.
.. automethod:: file
.. automethod:: entry_points_map
'''
## Used by __repr__ method
_arg_name = None
_zip_safe = True
_name = _version = None
def __init__(self, file_objects):
self.metadata = {}
self.file_objects = file_objects
self._get_metadata()
def __repr__(self):
## A little trick to get the real name from sub-classes (like Egg or SDist)
return '<{0}[{1}] object at {2}>'.format(self.__class__.__name__, self._arg_name, id(self))
def _get_metadata(self):
for data, name in self.file_objects:
if name == 'not-zip-safe':
self._zip_safe = False
elif name.endswith('.txt') or name == 'PKG-INFO':
metadata = MetadataFileParser(data, name).parse()
#if not metadata:
# continue
self.metadata[name] = metadata
return self.metadata
@property
def has_metadata(self):
return bool(self.metadata)
@property
def pkg_info(self):
return self.file('PKG-INFO')
@property
def name(self):
try:
return self.pkg_info['Name']
except KeyError:
if self._name is None:
raise
return self._name
@property
def version(self):
try:
return self.pkg_info['Version']
except KeyError:
if self._version is None:
raise
return self._version
@property
def as_req(self):
return '{0}=={1}'.format(self.name, self.version)
@property
def requires(self):
return self.file('requires.txt')
@property
def zip_safe(self):
return self._zip_safe
@property
def files(self):
return list(self.metadata.keys())
def file(self, name):
'''
Returns the content of the specified file. Raises :exc:`KeyError` when the distribution does not have such file.
'''
if name not in self.metadata:
raise KeyError('This package does not have {0} file'.format(name))
return self.metadata[name]
def entry_points_map(self, group):
'''
Returns the elements under the specified section in the :file:`entry_points.txt` file.
'''
try:
return self.file('entry_points.txt')[group]
except KeyError:
return {}
class Egg(Dist):
'''
Given the egg path, returns a Dist object::
>>> e = Egg('pyg-0.4-py2.7.egg')
>>> e
<Egg[pyg-0.4-py2.7.egg] object at 157366028>
>>> e.files()
['top_level.txt', 'requires.txt', 'PKG-INFO', 'entry_points.txt', 'SOURCES.txt']
>>> e.file('requires.txt')
['setuptools', 'pkgtools>=0.3.1', 'argh>=0.14']
>>> e.pkg_info['Name']
'pyg'
>>> e.name
'pyg'
>>> e.version
'0.4'
>>> e.as_req
'pyg==0.4'
>>> e.entry_points_map('console_scripts')
{'pyg': 'pyg:main'}
>>> e.file('entry_points.txt')
{'console_scripts': {'pyg': 'pyg:main'}}
'''
def __init__(self, egg_path):
z = zipfile.ZipFile(egg_path)
self.location = self._arg_name = os.path.abspath(egg_path)
super(Egg, self).__init__(zip_files(z, 'EGG-INFO'))
class SDist(Dist):
'''
Given the source distribution path, returns a Dist object::
>>> s = SDist('pyg-0.4.tar.gz')
>>> s
<SDist[pyg-0.4.tar.gz] object at 157425036>
>>> s.files()
['top_level.txt', 'requires.txt', 'PKG-INFO', 'entry_points.txt', 'SOURCES.txt']
>>> s.pkg_info['Metadata-Version']
'1.1'
>>> s.as_req()
'pyg==0.4'
'''
def __init__(self, sdist_path):
e = ext(sdist_path)
if e == '.zip':
arch = zipfile.ZipFile(sdist_path)
func = zip_files
elif e.startswith('.tar'):
mode = 'r' if e == '.tar' else 'r:' + e.split('.')[2]
arch = tarfile.open(sdist_path, mode=mode)
func = tar_files
self.location = self._arg_name = os.path.abspath(sdist_path)
super(SDist, self).__init__(func(arch))
class Dir(Dist):
'''
Given a path containing the metadata files, returns a Dist object::
>>> d = Dir('/usr/local/lib/python2.7/dist-packages/pypol_-0.5.egg-info')
>>> d
<Dir[/usr/local/lib/python2.7/dist-packages/pypol_-0.5.egg-info] object at 157419436>
>>> d.as_req()
'pypol-==0.5'
>>> d.pkg_info
{'Name': 'pypol-', 'License': 'GNU GPL v3',
'Author': 'Michele Lacchia', 'Metadata-Version': '1.0',
'Home-page': 'http://pypol.altervista.org/',
'Summary': 'Python polynomial library', 'Platform': 'any',
'Version': '0.5', 'Download-URL': 'http://github.com/rubik/pypol/downloads/',
'Classifier': 'Programming Language :: Python :: 2.7',
'Author-email': 'michelelacchia@gmail.com', 'Description': 'UNKNOWN'
}
'''
def __init__(self, path):
files = []
if not os.path.exists(os.path.join(path, 'PKG-INFO')):
raise ValueError('This directory does not contain metadata files')
for f in os.listdir(path):
if not os.path.isfile(os.path.join(path, f)):
continue
with open(os.path.join(path, f)) as fobj:
data = fobj.read()
files.append((data, f))
self._arg_name = os.path.normpath(path)
self.location = os.path.abspath(path)
super(Dir, self).__init__(files)
class EggDir(Dir):
'''
Given a directory path which contains an EGG-INFO dir, returns a Dist object::
>>> ed = EggDir('/usr/local/lib/python2.7/dist-packages/pkgtools-0.3.1-py2.7.egg')
>>> ed
<EggDir[/usr/local/lib/python2.7/dist-packages/pkgtools-0.3.1-py2.7.egg/EGG-INFO] object at 145505740>
>>> ed.files()
['top_level.txt', 'PKG-INFO', 'SOURCES.txt']
>>> ed.pkg_info['Summary']
'Python Packages Tools'
>>> ed.as_req()
'pkgtools==0.3.1'
>>> ed.file('entry_points.txt')
Traceback (most recent call last):
File "<pyshell#7>", line 1, in <module>
ed.file('entry_points.txt')
File "/usr/local/lib/python2.7/dist-packages/pkgtools-0.3.1-py2.7.egg/pkgtools/pkg.py", line 140, in file
raise KeyError('This package does not have {0} file'.format(name))
KeyError: 'This package does not have entry_points.txt file'
>>> ed.zip_safe
False
'''
def __init__(self, path):
path = os.path.join(path, 'EGG-INFO')
if not os.path.exists(path):
raise ValueError('Path does not exist: {0}'.format(path))
super(EggDir, self).__init__(path)
class Develop(Dir):
'''
This class accepts either a string or a module object. Returns a Dist object::
>>> d = Develop('pkgtools')
>>> d
<Develop[pkgtools] object at 158833324>
>>> d.files()
['top_level.txt', 'dependency_links.txt', 'PKG-INFO', 'SOURCES.txt']
>>> d.file('SOURCES.txt')
['AUTHORS', 'CHANGES', 'LICENSE', 'MANIFEST.in', 'README', 'TODO', 'setup.py',
'docs/Makefile', 'docs/conf.py', 'docs/index.rst', 'docs/make.bat', 'docs/pkg.rst',
'docs/pypi.rst', 'docs/_themes/pyg/theme.conf', 'docs/_themes/pyg/static/pyg.css_t',
'pkgtools/__init__.py', 'pkgtools/__init__.pyc', 'pkgtools/pkg.py', 'pkgtools/pkg.pyc',
'pkgtools/pypi.py', 'pkgtools/pypi.pyc', 'pkgtools/utils.py', 'pkgtools/utils.pyc',
'pkgtools.egg-info/PKG-INFO', 'pkgtools.egg-info/SOURCES.txt',
'pkgtools.egg-info/dependency_links.txt', 'pkgtools.egg-info/top_level.txt']
>>> import pyg
>>> d = Develop(pyg)
>>> d
<Develop[/home/3jkldfi84r2hj/pyg/pyg.egg-info] object at 175354540>
>>> d.files()
['requires.txt', 'PKG-INFO', 'SOURCES.txt', 'top_level.txt', 'dependency_links.txt', 'entry_points.txt']
'''
def __init__(self, package):
if isinstance(package, str):
try:
package = __import__(package)
except ImportError:
raise ValueError('cannot import {0}'.format(package))
package_name = package.__package__
if package_name is None:
package_name = package.__name__
d = os.path.dirname(package.__file__)
egg_info = package_name + '.egg-info'
paths = [os.path.join(d, egg_info), os.path.join(d, '..', egg_info)]
for p in paths:
if os.path.exists(p):
path = p
break
else:
raise ValueError('Cannot find metadata for {0}'.format(package_name))
super(Develop, self).__init__(path)
class Installed(Dir):
'''
This class accepts either a string or a module object and returns a Dist object::
>>> i = Installed('argh')
>>> i
<Installed[argh] object at 158358348>
>>> i.files()
['top_level.txt', 'dependency_links.txt', 'PKG-INFO', 'SOURCES.txt']
>>> i.file('top_level.txt')
['argh']
>>> import argh
>>> i = Installed(argh)
>>> i
<Installed[/usr/local/lib/python2.7/dist-packages/argh-0.14.0-py2.7.egg-info] object at 175527500>
>>> i.files()
['top_level.txt', 'dependency_links.txt', 'PKG-INFO', 'SOURCES.txt']
.. automethod:: installed_files
'''
def __init__(self, package):
if isinstance(package, str):
try:
package = __import__(package)
except (ImportError, SystemExit):
try:
package = __import__(package.lower())
except (ImportError, SystemExit):
raise ValueError('cannot import {0}'.format(package))
package_name = package.__package__
if package_name is None:
package_name = package.__name__
base_patterns = ('{0}-*.egg-info', 'EGG-INFO')
patterns = []
for bp in base_patterns:
patterns.extend([bp.format(n) for n in (package_name, package_name.capitalize())])
dir, name = os.path.split(package.__file__)
candidates = []
for p in patterns:
for g in (os.path.join(d, p) for d in (dir, os.path.join(dir, '..'))):
candidates.extend(glob.glob(g))
for c in candidates:
if os.path.exists(os.path.join(c, 'PKG-INFO')):
path = c
break
else:
raise ValueError('cannot find PKG-INFO for {0}'.format(package_name))
self.package_name = package_name
super(Installed, self).__init__(path)
def installed_files(self):
'''
.. versionadded:: 0.7
Returns the installed files of this distribution.
It returns a dictionary, with these keys:
* lib: files placed into the :file:`site/dist-packages` directory.
* bin: Python executable file names
Example::
>>> i = Installed('pyg')
>>> files = i.installed_files()
>>> files['lib']
set(['/usr/local/lib/python2.7/site-packages/pyg-0.7.1-py2.7.egg/pyg',
'/usr/local/lib/python2.7/site-packages/pyg-0.7-1-py2.7.egg/EGG-INFO'])
>>> files['bin']
set(['pyg2.7', 'pyg'])
'''
loc = {
'lib': set([self.location]),
'bin': set(),
#'binpath': set(), too dangerous
#'data': set(),
}
patterns = [
self.package_name,
]
for e in ('py', 'pyc', 'pyo', 'so', 'egg'):
patterns.append('%s*.%s' % (self.package_name, e))
base = os.path.dirname(self.location)
for pattern in patterns:
loc['lib'].update(glob.iglob(os.path.join(base, pattern)))
for group in ('console_scripts', 'gui_scripts'):
try:
loc['bin'].update(self.entry_points_map(group).keys())
except KeyError:
continue
# removed for now; too dangerous
#path = os.getenv('PATH', '').split(':')
#if path:
# for executable in loc['bin']:
# for p in path:
# fullpath = os.path.join(p, executable)
# if os.path.exists(fullpath):
# loc['binpath'].add(fullpath)
return loc
class WorkingSet(object):
def __init__(self, entries=None, onerror=None, debug=None):
self.packages = {}
self.onerror = onerror or (lambda arg: None)
self.debug = debug or (lambda arg: None)
self._find_packages()
def _find_packages(self):
for loader, package_name, ispkg in pkgutil.walk_packages(onerror=self.onerror):
if len(package_name.split('.')) > 1:
self.debug('Not a top-level package: {0}'.format(package_name))
continue
path = loader.find_module(package_name).filename
if ext(path) in ('.py', '.pyc', '.so'):
self.debug('Not a package: {0}'.format(package_name))
continue
## We want to keep only packages with metadata-files
try:
installed = Installed(package_name)
except Exception as e:
self.debug('Error on retrieving metadata from {0}: {1}'.format(package_name, e))
continue
self.packages[installed.name] = (path, installed)
def get(self, package_name, default=None):
return self.packages.get(package_name, default)
def __contains__(self, item):
return item in self.packages
def __iter__(self):
for package, data in list(self.packages.items()):
yield package, data
def __bool__(self):
return bool(self.packages)
def __len__(self):
return len(self.packages)
def get_metadata(pkg):
import types
if type(pkg) is types.ModuleType:
return Installed(pkg)
if isinstance(pkg, str):
try:
m = __import__(pkg)
except ImportError:
pass
else:
try:
return Installed(pkg)
except ValueError:
return Develop(pkg)
if os.path.exists(pkg):
e = ext(pkg)
if os.path.isdir(pkg):
if e == '.egg':
return EggDir(pkg)
return Dir(pkg)
if e in ('.tar', '.tar.gz', '.tar.bz2', '.zip'):
return SDist(pkg)
elif e == '.egg':
return Egg(pkg)
raise TypeError('Cannot return a Dist object') | {
"content_hash": "48df49f7757349acc84d7a0894e0c873",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 145,
"avg_line_length": 31.9402460456942,
"alnum_prop": 0.5360404974138879,
"repo_name": "rubik/pkgtools",
"id": "9c333ac8e48ba1c3f78528e2e85a6cd48b1def03",
"size": "18174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkgtools/pkg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39844"
},
{
"name": "Shell",
"bytes": "4515"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
@register.tag
def order_menu(parser, token):
try:
tag, request, ordering = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('%s tag requires 2 argument' % token.split_contents()[0])
return OrderMenuNode(request, ordering)
ORDERINGS = (
(_('Last updated'), 'newest'),
(_('Name'), 'name'),
)
def get_url(request, ordering, new_ordering):
class OrderMenuNode(template.Node):
def __init__(self, request):
self.request = template.Variable(request)
self.ordering = template.Variable(ordering)
def render(self, context):
markup = [u'<ul>']
request = self.request.resolve(context)
ordering = self.ordering.resolve(context)
for ord in ORDERINGS:
if ord[1] == ordering:
markup.append(u'<li><a class="current">%s</a></li>' % ugettext(ord[0]))
else:
markup.append(u'<li><a href="%s">%s</a></li>' % (get_url(request, ord[1]), ugettext(ord[0])))
| {
"content_hash": "98f692fe1324e41ff58aa268a0c4900d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 110,
"avg_line_length": 31.085714285714285,
"alnum_prop": 0.6038602941176471,
"repo_name": "fiam/oauthsp",
"id": "f3c4c62b6ae36bac3cdf401ec008380b4842a3ae",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templatetags/order_menu.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
'''
Copyright 2012 STFC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Konrad Jopek
'''
from datetime_utils import valid_from,valid_until, parse_timestamp, parse_time, iso2seconds
from exceptions import install_exc_handler, default_handler
from parsing_utils import parse_fqan
from hashing import calculate_hash
import logging
import sys
LOG_BREAK = '========================================'
def set_up_logging(logfile, level, console):
levels = {"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARN": logging.WARN,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL}
fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(fmt)
log = logging.getLogger()
log.setLevel(levels[level])
if logfile is not None:
fh = logging.FileHandler(logfile)
fh.setFormatter(formatter)
log.addHandler(fh)
if console:
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
log.addHandler(ch)
| {
"content_hash": "5715c157522a57f88756b6ecd8560aaf",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 91,
"avg_line_length": 31.03921568627451,
"alnum_prop": 0.6689829437776375,
"repo_name": "stfc/apel",
"id": "7af15956121ed5f84f372efabfba6f544a4b1f57",
"size": "1583",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apel/common/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "466479"
},
{
"name": "Shell",
"bytes": "4089"
},
{
"name": "TSQL",
"bytes": "23113"
}
],
"symlink_target": ""
} |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class Subscription(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def getSubscriptions(self,startIndex = None, pageSize = None, sortBy = None, filter = None, responseFields = None):
""" Retrieves a list of events according to any specified filter criteria and sort options.
Args:
| startIndex (int) - When creating paged results from a query, this value indicates the zero-based offset in the complete result set where the returned entities begin. For example, with a pageSize of 25, to get the 51st through the 75th items, use startIndex=3 .
| pageSize (int) - The number of results to display on each page when creating paged results from a query. The amount is divided and displayed on the pageCount amount of pages. The default is 20 and maximum value is 200 per page.
| sortBy (string) - The element to sort the results by and the channel in which the results appear. Either ascending (a-z) or descending (z-a) channel. Optional.
| filter (string) - A set of expressions that consist of a field, operator, and value and represent search parameter syntax when filtering results of a query. Valid operators include equals (eq), does not equal (ne), greater than (gt), less than (lt), greater than or equal to (ge), less than or equal to (le), starts with (sw), or contains (cont). For example - "filter=IsDisplayed+eq+true"
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| SubscriptionCollection
Raises:
| ApiException
"""
url = MozuUrl("/api/event/push/subscriptions/?startIndex={startIndex}&pageSize={pageSize}&sortBy={sortBy}&filter={filter}&responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("filter", filter);
url.formatUrl("pageSize", pageSize);
url.formatUrl("responseFields", responseFields);
url.formatUrl("sortBy", sortBy);
url.formatUrl("startIndex", startIndex);
self.client.withResourceUrl(url).execute();
return self.client.result();
| {
"content_hash": "edf9009f29432ca25667d5f366721980",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 394,
"avg_line_length": 50.76470588235294,
"alnum_prop": 0.7427578215527231,
"repo_name": "sanjaymandadi/mozu-python-sdk",
"id": "ffa113dfc2ec1f8971055342f21fa8b2db3ead67",
"size": "2590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mozurestsdk/event/push/subscription.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "649189"
}
],
"symlink_target": ""
} |
from tsa.models import Endpoint, create_session
from tsa import logging
logger = logging.getLogger(__name__)
def read(limit=None):
'''
Yields Endpoints (but only those with content)
'''
DBSession = create_session()
endpoints_with_content = DBSession.query(Endpoint).\
filter(Endpoint.status_code == 200).\
filter(Endpoint.content is not None).\
filter(Endpoint.content != '').\
order_by(Endpoint.id)
total = endpoints_with_content.count()
if limit:
logger.info('Reading %d out of a total %d endpoints with content', limit, total)
for endpoint in endpoints_with_content.limit(limit):
yield endpoint
| {
"content_hash": "3e342049164399e198dd8a2b5661ba39",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 88,
"avg_line_length": 31,
"alnum_prop": 0.6627565982404692,
"repo_name": "chbrown/tsa",
"id": "ec09bd8055e80b7dc0e757dce7473083ebb95edb",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tsa/data/sb5b/links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7793"
},
{
"name": "JavaScript",
"bytes": "102405"
},
{
"name": "Jupyter Notebook",
"bytes": "424806"
},
{
"name": "Python",
"bytes": "157423"
}
],
"symlink_target": ""
} |
"""
A complete binary tree is a binary tree in which every level, except possibly
the last, is completely filled, and all nodes are as far left as possible.
Write a data structure CBTInserter that is initialized with a complete binary
tree and supports the following operations:
CBTInserter(TreeNode root) initializes the data structure on a given tree with
head node root;
CBTInserter.insert(int v) will insert a TreeNode into the tree with value
node.val = v so that the tree remains complete, and returns the value of the
parent of the inserted TreeNode;
CBTInserter.get_root() will return the head node of the tree.
Example 1:
Input: inputs = ["CBTInserter","insert","get_root"], inputs = [[[1]],[2],[]]
Output: [null,1,[1,2]]
Example 2:
Input: inputs = ["CBTInserter","insert","insert","get_root"], inputs =
[[[1,2,3,4,5,6]],[7],[8],[]]
Output: [null,3,4,[1,2,3,4,5,6,7,8]]
Note:
The initial given tree is complete and contains between 1 and 1000 nodes.
CBTInserter.insert is called at most 10000 times per test case.
Every value of a given or inserted node is between 0 and 5000.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from collections import deque
class CBTInserter:
def __init__(self, root: TreeNode):
"""
Maintain a dequeue of insertion candidates
Insertion candidates are non-full nodes (superset of leaf nodes)
BFS to get the insertion candidates
During insertion, insert the node to the first insertion candidate's
child. Then, the inserting node is the last in the candidate queue
"""
self.candidates = deque()
self.root = root
q = [root] # can also use deque
while q:
cur_q = []
for e in q:
if e.left:
cur_q.append(e.left)
if e.right:
cur_q.append(e.right)
if not e.left or not e.right:
# non-full node
self.candidates.append(e)
q = cur_q
def insert(self, v: int) -> int:
pi = self.candidates[0]
node = TreeNode(v)
if not pi.left:
pi.left = node
else:
pi.right = node
if pi.left and pi.right:
self.candidates.popleft()
self.candidates.append(node)
return pi.val
def get_root(self) -> TreeNode:
return self.root
# Your CBTInserter object will be instantiated and called as such:
# obj = CBTInserter(root)
# param_1 = obj.insert(v)
# param_2 = obj.get_root()
| {
"content_hash": "72a7eb6d86f41d47aa8939237ebc7454",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 30.02247191011236,
"alnum_prop": 0.6216317365269461,
"repo_name": "algorhythms/LeetCode",
"id": "2c4ec26fca2c49952afeaa6f122dcce05c47b93b",
"size": "2691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "919 Complete Binary Tree Inserter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
} |
import nltk
import re
import os
import time
p = nltk.PorterStemmer()
b = re.compile(r"\b\w+\b")
tokens = b.findall(open("./text/de-bello-gallico.txt").read().lower())
def test(tokens):
print("call...")
for i in range(0, 10):
print("loop...", i)
[p.stem(t) for t in tokens]
start = time.time()
test(tokens)
end = time.time()
print((end - start))
| {
"content_hash": "bfbbd4b062ff647600f0384f68318c1a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 17.80952380952381,
"alnum_prop": 0.606951871657754,
"repo_name": "rharriso/nltk-workspace",
"id": "e2c2645464cb11400b74bb3849acada08f52f7bf",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "porter-stemmer-bench.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13496"
}
],
"symlink_target": ""
} |
"""The Apple TV integration."""
import asyncio
import logging
from random import randrange
from pyatv import connect, exceptions, scan
from pyatv.const import Protocol
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import (
ATTR_CONNECTIONS,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_SUGGESTED_AREA,
ATTR_SW_VERSION,
CONF_ADDRESS,
CONF_NAME,
CONF_PROTOCOL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import CONF_CREDENTIALS, CONF_IDENTIFIER, CONF_START_OFF, DOMAIN
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Apple TV"
BACKOFF_TIME_UPPER_LIMIT = 300 # Five minutes
NOTIFICATION_TITLE = "Apple TV Notification"
NOTIFICATION_ID = "apple_tv_notification"
SIGNAL_CONNECTED = "apple_tv_connected"
SIGNAL_DISCONNECTED = "apple_tv_disconnected"
PLATFORMS = [MP_DOMAIN, REMOTE_DOMAIN]
async def async_setup_entry(hass, entry):
"""Set up a config entry for Apple TV."""
manager = AppleTVManager(hass, entry)
hass.data.setdefault(DOMAIN, {})[entry.unique_id] = manager
async def on_hass_stop(event):
"""Stop push updates when hass stops."""
await manager.disconnect()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop)
)
async def setup_platforms():
"""Set up platforms and initiate connection."""
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
)
)
await manager.init()
hass.async_create_task(setup_platforms())
return True
async def async_unload_entry(hass, entry):
"""Unload an Apple TV config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
manager = hass.data[DOMAIN].pop(entry.unique_id)
await manager.disconnect()
return unload_ok
class AppleTVEntity(Entity):
"""Device that sends commands to an Apple TV."""
_attr_should_poll = False
def __init__(self, name, identifier, manager):
"""Initialize device."""
self.atv = None
self.manager = manager
self._attr_name = name
self._attr_unique_id = identifier
self._attr_device_info = DeviceInfo(identifiers={(DOMAIN, identifier)})
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
@callback
def _async_connected(atv):
"""Handle that a connection was made to a device."""
self.atv = atv
self.async_device_connected(atv)
self.async_write_ha_state()
@callback
def _async_disconnected():
"""Handle that a connection to a device was lost."""
self.async_device_disconnected()
self.atv = None
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"{SIGNAL_CONNECTED}_{self.unique_id}", _async_connected
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_DISCONNECTED}_{self.unique_id}",
_async_disconnected,
)
)
def async_device_connected(self, atv):
"""Handle when connection is made to device."""
def async_device_disconnected(self):
"""Handle when connection was lost to device."""
class AppleTVManager:
"""Connection and power manager for an Apple TV.
An instance is used per device to share the same power state between
several platforms. It also manages scanning and connection establishment
in case of problems.
"""
def __init__(self, hass, config_entry):
"""Initialize power manager."""
self.config_entry = config_entry
self.hass = hass
self.atv = None
self._is_on = not config_entry.options.get(CONF_START_OFF, False)
self._connection_attempts = 0
self._connection_was_lost = False
self._task = None
async def init(self):
"""Initialize power management."""
if self._is_on:
await self.connect()
def connection_lost(self, _):
"""Device was unexpectedly disconnected.
This is a callback function from pyatv.interface.DeviceListener.
"""
_LOGGER.warning(
'Connection lost to Apple TV "%s"', self.config_entry.data[CONF_NAME]
)
self._connection_was_lost = True
self._handle_disconnect()
def connection_closed(self):
"""Device connection was (intentionally) closed.
This is a callback function from pyatv.interface.DeviceListener.
"""
self._handle_disconnect()
def _handle_disconnect(self):
"""Handle that the device disconnected and restart connect loop."""
if self.atv:
self.atv.listener = None
self.atv.close()
self.atv = None
self._dispatch_send(SIGNAL_DISCONNECTED)
self._start_connect_loop()
async def connect(self):
"""Connect to device."""
self._is_on = True
self._start_connect_loop()
async def disconnect(self):
"""Disconnect from device."""
_LOGGER.debug("Disconnecting from device")
self._is_on = False
try:
if self.atv:
self.atv.push_updater.listener = None
self.atv.push_updater.stop()
self.atv.close()
self.atv = None
if self._task:
self._task.cancel()
self._task = None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("An error occurred while disconnecting")
def _start_connect_loop(self):
"""Start background connect loop to device."""
if not self._task and self.atv is None and self._is_on:
self._task = asyncio.create_task(self._connect_loop())
else:
_LOGGER.debug(
"Not starting connect loop (%s, %s)", self.atv is None, self._is_on
)
async def _connect_loop(self):
"""Connect loop background task function."""
_LOGGER.debug("Starting connect loop")
# Try to find device and connect as long as the user has said that
# we are allowed to connect and we are not already connected.
while self._is_on and self.atv is None:
try:
conf = await self._scan()
if conf:
await self._connect(conf)
except exceptions.AuthenticationError:
self._auth_problem()
break
except asyncio.CancelledError:
pass
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to connect")
self.atv = None
if self.atv is None:
self._connection_attempts += 1
backoff = min(
randrange(2 ** self._connection_attempts), BACKOFF_TIME_UPPER_LIMIT
)
_LOGGER.debug("Reconnecting in %d seconds", backoff)
await asyncio.sleep(backoff)
_LOGGER.debug("Connect loop ended")
self._task = None
def _auth_problem(self):
"""Problem to authenticate occurred that needs intervention."""
_LOGGER.debug("Authentication error, reconfigure integration")
name = self.config_entry.data[CONF_NAME]
identifier = self.config_entry.unique_id
self.hass.components.persistent_notification.create(
"An irrecoverable connection problem occurred when connecting to "
f"`{name}`. Please go to the Integrations page and reconfigure it",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
# Add to event queue as this function is called from a task being
# cancelled from disconnect
asyncio.create_task(self.disconnect())
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={CONF_NAME: name, CONF_IDENTIFIER: identifier},
)
)
async def _scan(self):
"""Try to find device by scanning for it."""
identifier = self.config_entry.unique_id
address = self.config_entry.data[CONF_ADDRESS]
protocol = Protocol(self.config_entry.data[CONF_PROTOCOL])
_LOGGER.debug("Discovering device %s", identifier)
atvs = await scan(
self.hass.loop, identifier=identifier, protocol=protocol, hosts=[address]
)
if atvs:
return atvs[0]
_LOGGER.debug(
"Failed to find device %s with address %s, trying to scan",
identifier,
address,
)
atvs = await scan(self.hass.loop, identifier=identifier, protocol=protocol)
if atvs:
return atvs[0]
_LOGGER.debug("Failed to find device %s, trying later", identifier)
return None
async def _connect(self, conf):
"""Connect to device."""
credentials = self.config_entry.data[CONF_CREDENTIALS]
session = async_get_clientsession(self.hass)
for protocol, creds in credentials.items():
conf.set_credentials(Protocol(int(protocol)), creds)
_LOGGER.debug("Connecting to device %s", self.config_entry.data[CONF_NAME])
self.atv = await connect(conf, self.hass.loop, session=session)
self.atv.listener = self
self._dispatch_send(SIGNAL_CONNECTED, self.atv)
self._address_updated(str(conf.address))
self._async_setup_device_registry()
self._connection_attempts = 0
if self._connection_was_lost:
_LOGGER.info(
'Connection was re-established to Apple TV "%s"',
self.config_entry.data[CONF_NAME],
)
self._connection_was_lost = False
@callback
def _async_setup_device_registry(self):
attrs = {
ATTR_IDENTIFIERS: {(DOMAIN, self.config_entry.unique_id)},
ATTR_MANUFACTURER: "Apple",
ATTR_NAME: self.config_entry.data[CONF_NAME],
}
area = attrs[ATTR_NAME]
name_trailer = f" {DEFAULT_NAME}"
if area.endswith(name_trailer):
area = area[: -len(name_trailer)]
attrs[ATTR_SUGGESTED_AREA] = area
if self.atv:
dev_info = self.atv.device_info
attrs[ATTR_MODEL] = (
DEFAULT_NAME + " " + dev_info.model.name.replace("Gen", "")
)
attrs[ATTR_SW_VERSION] = dev_info.version
if dev_info.mac:
attrs[ATTR_CONNECTIONS] = {(dr.CONNECTION_NETWORK_MAC, dev_info.mac)}
device_registry = dr.async_get(self.hass)
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id, **attrs
)
@property
def is_connecting(self):
"""Return true if connection is in progress."""
return self._task is not None
def _address_updated(self, address):
"""Update cached address in config entry."""
_LOGGER.debug("Changing address to %s", address)
self.hass.config_entries.async_update_entry(
self.config_entry, data={**self.config_entry.data, CONF_ADDRESS: address}
)
def _dispatch_send(self, signal, *args):
"""Dispatch a signal to all entities managed by this manager."""
async_dispatcher_send(
self.hass, f"{signal}_{self.config_entry.unique_id}", *args
)
| {
"content_hash": "1091a64fd5ac2a0f8a7badde9dd22a14",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 87,
"avg_line_length": 32.90691489361702,
"alnum_prop": 0.6036531156550554,
"repo_name": "jawilson/home-assistant",
"id": "b710a753da938668ca631287d1c3bfd5cec339d9",
"size": "12373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/apple_tv/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import pandas as pd
from variant_annotations import process_variant_annotations, mp_variant_annotations
from vcf_metadata import VCFMetadata
class VCF(object):
"""Loads in a vcf file, aware of gzipped files.
Parameters
--------------------------------------
filename: str, required
path to vcf file
sample_id: str or list, default='all'
specifies the sample column ids to read and parse
'all' means all sample columns
can use a str (e.g. 'NA12878')
or
can use a list (e.g. ['NA12878', 'NA12877']
cols: list, default ['#CHROM', 'POS', 'REF', 'ALT', 'FORMAT']
specifies the VCF column names, EXCEPT SAMPLE COLS, to read and parse
Must include ['#CHROM', 'POS', 'REF', 'ALT', 'FORMAT']
Additional columns such as QUAL, FILTER, INFO will be accepted
e.g. ['#CHROM', 'POS', 'REF', 'ALT', 'FORMAT', 'INFO', 'QUAL']
chunksize: int, default=5000
specifies the number of VCF lines to read and parse in 1 chunk
Note using a large chunksize with large n_cores requires LOTS OF RAM
requires ~40 seconds to parse 1000 rows with 2500 samples
Methods
-----------------------------------------
get_vcf_df_chunk
returns VCF pandasDF with chunksize
add_variant_annotations
Annotates each variant
See docstring for details
Returns VCF Obj with following attributes
-----------------------------------------
header_df: pandas df
VCF header as a pandas df
samples: list
sample column IDs
all_columns: list
all sample column IDs in VCF
vcf_chunks: pandas.io.parsers.TextFileReader chunk
VCF chunk
Access to chunk provided by get_vcf_df_chunk()
df: pandas DF
Index: CHROM, POS, REF, ALT
Columns: CHROM, POS, REF, ALT, SAMPLE(S) +/- {QUAL, FILTER, INFO if specified}
"""
def __init__(self, filename, sample_id='all',
cols=['#CHROM', 'POS', 'REF', 'ALT', 'FORMAT'],
chunksize=5000):
# Header
header_parsed = VCFMetadata(filename)
# header parsed into key/values dataframe
self.header_df = self.get_header_df(header_parsed.header)
# Sample IDs
self.samples = list(self.header_df.loc['SampleIDs'])[0]
self.sample_id = self.get_sample_ids(sample_id)
self.set_cols(cols)
self.set_dtypes()
# Open pandas chunk object (TextReader)
self.chunksize = chunksize
self.vcf_chunks = pd.read_csv(filename, sep="\t",
compression=header_parsed.compression,
skiprows=(len(self.header_df) - 2),
usecols=self.usecols,
chunksize=chunksize,
dtype=self.vcf_dtypes)
def get_header_df(self, header_txt):
"""Parses header into pandas DataFrame"""
try:
key_value_header = [i.replace('##', '').replace(
'\n', '').split('=', 1) for i in header_txt if '##' in i]
key_value_header.append(
['SampleIDs', header_txt[-1].rstrip('\n').split('\t')[9:]])
key_value_header.append(
['ColumnHeader', header_txt[-1].rstrip('\n').split('\t')])
header_df = pd.DataFrame.from_records(key_value_header)
header_df.set_index(0, inplace=True)
header_df.index.name = 'header_keys'
header_df.columns = ['header_values']
return header_df
except IndexError:
print("VCF header parsing failed, "
"this may be due to the use of "
"tabix version 1.2.x, please upgrade to tabix 1.3 or greater")
return
def get_sample_ids(self, sample_id):
"""
Identifies and stores sample_id(s)
"""
if sample_id == 'all':
return self.samples[:]
else:
if type(sample_id) == str:
return [sample_id]
else:
return sample_id
def set_cols(self, cols):
# Columns
self.all_columns = list(self.header_df.ix['ColumnHeader'])[0]
self.FORMAT = self.all_columns[8]
assert len(set(cols) & set(['#CHROM', 'POS', 'REF', 'ALT', 'FORMAT'])) > 4, "cols requires the following columns: ['#CHROM', 'POS', 'REF', 'ALT', 'FORMAT']"
self.cols = cols
if len(cols) > 0: # columns specified
self.usecols = [c for c in self.all_columns if c in cols]
if len(self.sample_id) > 0:
self.usecols.extend(self.sample_id)
# print self.usecols
else:
assert False, 'no sample IDs'
else: # columns not specified
self.usecols = [s for s in self.cols if s not in self.samples]
self.usecols.extend(self.sample_id)
def set_dtypes(self):
self.vcf_dtypes = {'CHROM':'category',
'POS':'int32',
'REF':'category',
'ALT':'category',
'FORMAT':'category',
'QUAL':'int8',
'FILTER':'category'}
def get_vcf_df_chunk(self):
"""
This function iterates through the VCF files using the user-defined
chunksize (default = 5000 lines).
"""
try:
self.df = self.vcf_chunks.get_chunk()
self.stopIteration = False
except StopIteration:
self.stopIteration = True
print("End of File Reached")
# self.df = None
return 1
self.df.drop_duplicates(inplace=True) # dropping duplicate rows
self.df.columns = [c.replace('#', '') for c in self.usecols]
self.df['CHROM'] = self.df['CHROM'].astype(str).str.replace('chr', '').astype('category')
self.df.set_index(
['CHROM', 'POS', 'REF', 'ALT'], inplace=True, drop=False)
self.df_bytes = self.df.values.nbytes + \
self.df.index.nbytes + self.df.columns.nbytes
return 0
def add_variant_annotations(self, split_columns='', verbose=False,
inplace=False, drop_hom_ref=True,
n_cores=1):
"""
This function adds the following annotations for each variant:
multiallele, phase, a1, a2, GT1, GT2, vartype1, vartype2, zygosity,
and parsed FORMAT values, see below for additional information.
Parameters
--------------
split_columns: dict, optional
key:FORMAT id value:#fields expected
e.g. {'AD':2} indicates Allelic Depth should be
split into 2 columns.
drop_hom_ref: bool, default=True
This will drop homozygous reference genotype calls from
the long dataframe. As most calls in a multisample vcf
are homozygous reference, this will reduce memory requirements
dramatically.
verbose: bool, default=False
This will describe how many missing variants were dropped
inplace: bool, default=False
This will replace the sample_id column with parsed columns,
and drop the FORMAT field. If True, this will create an
additional dataframe, df_annot, to the VCF object composed of
the parsed columns (memory intensive)
n_cores: int, default=1
specifies the number of cpus to use during variantAnnotation
Note using a large chunksize with large n_cores requires LOTS OF RAM
Output
--------------
This function adds the following annotations to each variant:
multiallele: {0,1} 0=biallele 1=multiallelic
phase: {'/', '|'} /=unphased, |=phased
a1: DNA base representation of allele1 call, e.g. A
a2: DNA base representation of allele2 call, e.g. A
GT1: numeric representation of allele1 call, e.g. 0
GT2: numeric representation of allele2 call, e.g. 1
vartype1: {snp, mnp, ins, del, indel or SV} variant type of first allele
vartype2: {snp, mnp, ins, del, indel or SV} variant type of second allele
zygosity: {het-ref, hom-ref, alt-ref, het-miss, hom-miss}
FORMAT values: any values associated with the genotype calls are
added as additional columns, split_columns are further
split by ',' into individual columns
"""
if self.stopIteration:
print('End of File Reached')
return 1
self.drop_hom_ref = drop_hom_ref
df_vcf_cols = self.df[list(set(self.df.columns)
- {'CHROM', 'POS', 'REF', 'ALT', 'FORMAT'}
- set(self.sample_id))]
self.df = self.df.reset_index(drop=True)
if n_cores==1:
if inplace:
self.df = process_variant_annotations(self.df,
split_columns=split_columns,
sample_id=self.sample_id,
drop_hom_ref=drop_hom_ref)
# joining QUAL, FILTER, and/or INFO columns
else:
self.df_annot = process_variant_annotations(self.df,
split_columns=split_columns,
sample_id=self.sample_id,
drop_hom_ref=drop_hom_ref)
else:
if inplace:
self.df = mp_variant_annotations(self.df,
n_cores=n_cores,
df_split_cols=split_columns,
df_sampleid=self.sample_id,
drop_hom_ref=drop_hom_ref)
else:
self.df_annot = mp_variant_annotations(self.df,
n_cores=n_cores,
df_split_cols=split_columns,
df_sampleid=self.sample_id,
drop_hom_ref=drop_hom_ref)
if inplace:
self.df = self.df.set_index(['CHROM', 'POS', 'REF', 'ALT'])
else:
self.df_annot = self.df_annot.set_index(['CHROM', 'POS', 'REF', 'ALT'])
return 0
| {
"content_hash": "85c953a2927d91a1cc642d27792b7043",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 164,
"avg_line_length": 36.53716216216216,
"alnum_prop": 0.5150254276467868,
"repo_name": "erscott/pandasVCF",
"id": "5a96291cfc2545deca82d0fc1c09397b5b3ed264",
"size": "10815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandasvcf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "91844"
},
{
"name": "Python",
"bytes": "40107"
}
],
"symlink_target": ""
} |
"""wrapper for xcodebuild command line tool (OSX only)"""
import subprocess
name = 'xcodebuild'
platforms = ['osx']
optional = False
not_found = 'please install Xcode and Xcode cmd line tools'
#------------------------------------------------------------------------------
def check_exists(fips_dir) :
"""test if xcodebuild is in the path
:returns: True if xcodebuild is in the path
"""
try :
subprocess.check_output(['xcodebuild', '-version'])
return True
except (OSError, subprocess.CalledProcessError) :
return False
#------------------------------------------------------------------------------
def run_build(fips_dir, target, build_type, build_dir, num_jobs=1) :
"""build a target
:param target: name of build target, or None
:param build_type: CMAKE_BUILD_TYPE string (e.g. Release, Debug)
:param build_dir: directory where xcode project file is located
:param num_jobs: number of parallel jobs (default: 1)
:returns: True if xcodebuild returns successful
"""
if not target :
target = "ALL_BUILD"
cmdLine = 'xcodebuild -jobs {} -configuration {} -target {}'.format(num_jobs, build_type, target)
print(cmdLine)
res = subprocess.call(cmdLine, cwd=build_dir, shell=True)
return res == 0
#------------------------------------------------------------------------------
def run_clean(fips_dir, build_dir) :
"""run the special 'clean' target
:params build_dir: directory where the xcode project file is located
:returns: True if xcodebuild returns successful
"""
try :
res = subprocess.call('xcodebuild clean', cwd=build_dir, shell=True)
return res == 0
except (OSError, subprocess.CalledProcessError) :
return False
| {
"content_hash": "bb7ee1a68f249bd19926b97faacf4b08",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 101,
"avg_line_length": 35.6078431372549,
"alnum_prop": 0.5644273127753304,
"repo_name": "michaKFromParis/fips",
"id": "cc12cd7d0ece6f3e9f208e25e79a64f523b67afe",
"size": "1816",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mod/tools/xcodebuild.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "CMake",
"bytes": "105971"
},
{
"name": "CSS",
"bytes": "7800"
},
{
"name": "HTML",
"bytes": "1828"
},
{
"name": "Python",
"bytes": "375271"
},
{
"name": "VimL",
"bytes": "211"
}
],
"symlink_target": ""
} |
'''
:Author:
Helder Vieira da Silva <contato@helder.eti.br>
:Created:
2014-04-01
:License:
BSD, see LICENSE for more details.
'''
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///%s' % os.path.join(basedir, 'app.db')
app.config['SECRET_KEY'] = '6945712387f7a9ab5b73ddfc12a3c7fd1bc8144165d7fe488f8733093676ssse'
db = SQLAlchemy(app)
from models import User, PhoneNumber
from views import index
| {
"content_hash": "6b0c08f7bb2fda2aedcf6d03f24e95b7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 93,
"avg_line_length": 23.14814814814815,
"alnum_prop": 0.6992,
"repo_name": "HelderVieira/flask_master_detail",
"id": "09d54bfcdf7abff15bbd5726c10a47157a2c4c69",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "980"
},
{
"name": "Python",
"bytes": "4301"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stocks', '0004_auto_20171011_1324'),
]
operations = [
migrations.CreateModel(
name='InvestmentBucketDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='investmentbucket',
name='invest_attributes',
field=models.ManyToManyField(related_name='bucket', to='stocks.InvestmentBucketDescription'),
),
]
| {
"content_hash": "d00b4c26de609c117718ac63e0be8253",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 30.12,
"alnum_prop": 0.5962815405046481,
"repo_name": "Neitsch/ASE4156",
"id": "194d8230e9718181ed812ad732886c80a59ce29f",
"size": "826",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stocks/migrations/0005_auto_20171011_1408.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52"
},
{
"name": "HTML",
"bytes": "6962"
},
{
"name": "JavaScript",
"bytes": "148908"
},
{
"name": "PHP",
"bytes": "219"
},
{
"name": "Python",
"bytes": "175771"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import struct
import threading
import subprocess
import SocketServer # TODO(leszeks): python 3 compatibility
def CreateFileHandlerClass(root_dirs, verbose):
class FileHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024);
while data[-1] != "\0":
data += self.request.recv(1024);
filename = data[0:-1]
try:
filename = os.path.abspath(filename)
if not any(filename.startswith(root) for root in root_dirs):
raise Exception("{} not in roots {}".format(filename, root_dirs))
if not os.path.isfile(filename):
raise Exception("{} is not a file".format(filename))
if verbose:
sys.stdout.write("Serving {}\r\n".format(os.path.relpath(filename)))
with open(filename) as f:
contents = f.read();
self.request.sendall(struct.pack("!i", len(contents)))
self.request.sendall(contents)
except Exception as e:
if verbose:
sys.stderr.write(
"Request failed ({})\n".format(e).replace('\n','\r\n'))
self.request.sendall(struct.pack("!i", -1))
return FileHandler
def TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose):
files_to_copy = ["d8", "natives_blob.bin", "snapshot_blob.bin"]
# Pipe the output of md5sum from the local computer to the device, checking
# the md5 hashes on the device.
local_md5_sum_proc = subprocess.Popen(
["md5sum"] + files_to_copy,
cwd=build_dir,
stdout=subprocess.PIPE
)
device_md5_check_proc = subprocess.Popen(
[
adb, "shell",
"mkdir -p '{0}' ; cd '{0}' ; md5sum -c -".format(device_d8_dir)
],
stdin=local_md5_sum_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# Push any files which failed the md5 check.
(stdoutdata, stderrdata) = device_md5_check_proc.communicate()
for line in stdoutdata.split('\n'):
if line.endswith(": FAILED"):
filename = line[:-len(": FAILED")]
if verbose:
print("Updating {}...".format(filename))
subprocess.check_call([
adb, "push",
os.path.join(build_dir, filename),
device_d8_dir
], stdout=sys.stdout if verbose else open(os.devnull, 'wb'))
def AdbForwardDeviceToLocal(adb, device_port, server_port, verbose):
if verbose:
print("Forwarding device:{} to localhost:{}...".format(
device_port, server_port))
subprocess.check_call([
adb, "reverse",
"tcp:{}".format(device_port),
"tcp:{}".format(server_port)
])
def AdbRunD8(adb, device_d8_dir, device_port, d8_args, verbose):
# Single-quote the arguments to d8, and concatenate them into a string.
d8_arg_str = " ".join("'{}'".format(a) for a in d8_args)
d8_arg_str = "--read-from-tcp-port='{}' ".format(device_port) + d8_arg_str
# Don't use os.path.join for d8 because we care about the device's os, not
# the host os.
d8_str = "{}/d8 {}".format(device_d8_dir, d8_arg_str)
if sys.stdout.isatty():
# Run adb shell with -t to have a tty if we run d8 without a script.
cmd = [adb, "shell", "-t", d8_str]
else:
cmd = [adb, "shell", d8_str]
if verbose:
print("Running {}".format(" ".join(cmd)))
return subprocess.call(cmd)
def PrintUsage(file=sys.stdout):
print("Usage: adb-d8.py [-v|--verbose] [--] <build_dir> [<d8 args>...]",
file=file)
def PrintHelp(file=sys.stdout):
print("""Usage:
adb-d8.py [options] [--] <build_dir> [<d8_args>...]
adb-d8.py -h|--help
Options:
-h|--help Show this help message and exit.
-v|--verbose Print verbose output.
--device-dir=DIR Specify which directory on the device should be used
for the d8 binary. [default: /data/local/tmp/v8]
--extra-root-dir=DIR In addition to the current directory, allow d8 to
access files inside DIR. Multiple additional roots
can be specified.
<build_dir> The directory containing the android build of d8.
<d8_args>... The arguments passed through to d8.""", file=file)
def Main():
if len(sys.argv) < 2:
PrintUsage(sys.stderr)
return 1
script_dir = os.path.dirname(sys.argv[0])
# Use the platform-tools version of adb so that we know it has the reverse
# command.
adb = os.path.join(
script_dir,
"../third_party/android_tools/sdk/platform-tools/adb"
)
# Read off any command line flags before build_dir (or --). Do this
# manually, rather than using something like argparse, to be able to split
# the adb-d8 options from the passthrough d8 options.
verbose = False
device_d8_dir = '/data/local/tmp/v8'
root_dirs = []
arg_index = 1
while arg_index < len(sys.argv):
arg = sys.argv[arg_index]
if not arg.startswith("-"):
break
elif arg == "--":
arg_index += 1
break
elif arg == "-h" or arg == "--help":
PrintHelp(sys.stdout)
return 0
elif arg == "-v" or arg == "--verbose":
verbose = True
elif arg == "--device-dir":
arg_index += 1
device_d8_dir = sys.argv[arg_index]
elif arg.startswith("--device-dir="):
device_d8_dir = arg[len("--device-dir="):]
elif arg == "--extra-root-dir":
arg_index += 1
root_dirs.append(sys.argv[arg_index])
elif arg.startswith("--extra-root-dir="):
root_dirs.append(arg[len("--extra-root-dir="):])
else:
print("ERROR: Unrecognised option: {}".format(arg))
PrintUsage(sys.stderr)
return 1
arg_index += 1
# Transfer d8 (and dependencies) to the device.
build_dir = os.path.abspath(sys.argv[arg_index])
TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose)
# Start a file server for the files d8 might need.
script_root_dir = os.path.abspath(os.curdir)
root_dirs.append(script_root_dir)
server = SocketServer.TCPServer(
("localhost", 0), # 0 means an arbitrary unused port.
CreateFileHandlerClass(root_dirs, verbose)
)
try:
# Start the file server in its own thread.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Port-forward the given device port to the file server.
# TODO(leszeks): Pick an unused device port.
# TODO(leszeks): Remove the port forwarding on exit.
server_ip, server_port = server.server_address
device_port = 4444
AdbForwardDeviceToLocal(adb, device_port, server_port, verbose)
# Run d8 over adb with the remaining arguments, using the given device
# port to forward file reads.
return AdbRunD8(
adb, device_d8_dir, device_port, sys.argv[arg_index+1:], verbose)
finally:
if verbose:
print("Shutting down file server...")
server.shutdown()
server.server_close()
if __name__ == '__main__':
sys.exit(Main())
| {
"content_hash": "faeadd6f8959e2650b78223396f72735",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 78,
"avg_line_length": 31.207207207207208,
"alnum_prop": 0.6283198614318707,
"repo_name": "fceller/arangodb",
"id": "416714692c59b205820ad64de618ede87bebf55f",
"size": "7594",
"binary": false,
"copies": "4",
"ref": "refs/heads/devel",
"path": "3rdParty/V8/v7.1.302.28/tools/adb-d8.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "AppleScript",
"bytes": "1429"
},
{
"name": "Assembly",
"bytes": "142084"
},
{
"name": "Batchfile",
"bytes": "9073"
},
{
"name": "C",
"bytes": "1938354"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "79379178"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "109718"
},
{
"name": "CSS",
"bytes": "1341035"
},
{
"name": "CoffeeScript",
"bytes": "94"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "Emacs Lisp",
"bytes": "15477"
},
{
"name": "Go",
"bytes": "1018005"
},
{
"name": "Groff",
"bytes": "263567"
},
{
"name": "HTML",
"bytes": "459886"
},
{
"name": "JavaScript",
"bytes": "55446690"
},
{
"name": "LLVM",
"bytes": "39361"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "178253"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "26909"
},
{
"name": "Objective-C",
"bytes": "4430"
},
{
"name": "Objective-C++",
"bytes": "1857"
},
{
"name": "Pascal",
"bytes": "145262"
},
{
"name": "Perl",
"bytes": "227308"
},
{
"name": "Protocol Buffer",
"bytes": "5837"
},
{
"name": "Python",
"bytes": "3563935"
},
{
"name": "Ruby",
"bytes": "1000962"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "19885"
},
{
"name": "Shell",
"bytes": "488846"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "Yacc",
"bytes": "36950"
}
],
"symlink_target": ""
} |
import os
currentDir = os.getcwd()
##### ------------ go to ../lighthouse/database/ ---------------- #####
database_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"../../"))
print database_path
##### ------------ for LAPACK routine info table ---------------- #####
### load data to table
### lapack_routineinfo
os.chdir(database_path+"/RoutineInfo/lapack")
os.system('python load_data.py')
os.chdir(database_path)
'''##### ------------ for LAPACK_le tables ---------------- #####
### load data to table
### lapack_le_linearequation_simple, lapack_le_linearequation_expert, lapack_le_linearequation_driver
os.chdir(database_path+"/lapack_le/Driver")
os.system('python load_data.py')
os.chdir(database_path)
### load data to table
### lapack_le_linearequation_computational, lapack_le_linearequation_factor
### lapack_le_linearequation_solve, lapack_le_linearequation_condition_number
### lapack_le_linearequation_error_bound, lapack_le_linearequation_invert
### lapack_le_linearequation_equilibrate
os.chdir(database_path+"/lapack_le/Computational")
os.system('python load_data.py')
os.chdir(database_path)
### load data to table
### lapack_le_only
os.chdir(database_path+"/lapack_le/Combine")
os.system('python load_data.py')
os.chdir(database_path)
### load data to table
### lapack_le_arg
os.chdir(database_path+"/lapack_le/Arguments")
os.system('python load_data.py')
os.chdir(database_path)
##### ------------ for LAPACK_eigen tables ---------------- #####
### load data to table
### lapack_eigen
os.chdir(database_path+"/lapack_eigen/guided")
os.system('python load_data.py')
os.chdir(database_path)
##### ------------ for LAPACK_svd tables ---------------- #####
### load data to table
### lapack_svd
os.chdir(database_path+"/lapack_svd/guided")
os.system('python load_data.py')
os.chdir(database_path)'''
| {
"content_hash": "b1826d04c952b2abd134e20f888d21c5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 101,
"avg_line_length": 27.191176470588236,
"alnum_prop": 0.6576527852893456,
"repo_name": "LighthouseHPC/lighthouse",
"id": "59c637316ed1986cd7f675a0fa11dab73d1f1961",
"size": "1890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/lily/django_orthg/orthg/database/dataLoad/lapack/dataload.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
__author__ = 'jiajunshen'
import amitgroup as ag
import numpy as np
from pnet.layer import Layer
@Layer.register('normalize-layer')
class NormalizeLayer(Layer):
def __init__(self):
self._mean = None
@property
def trained(self):
return self._mean is not None
def train(self, X, Y = None, OriginalX = None):
self._mean = np.mean(X, axis = 0)
def extract(self, X):
#return (X - self._mean).astype(np.float32)
print("max value .....")
print(np.max(X))
return (X).astype(np.float32)
def save_to_dict(self):
d = {}
d['mean'] = self._mean
return d
@classmethod
def load_from_dict(cls, d):
obj = cls()
obj._mean = d['mean']
return obj
| {
"content_hash": "dd3be74bcac2982528e2d877b39833ae",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 64,
"avg_line_length": 23.25,
"alnum_prop": 0.5746714456391876,
"repo_name": "jiajunshen/partsNet",
"id": "ea666b5f46c32f9e6af9080e366c66e94d529a27",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pnet/normalizeLayer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10668239"
},
{
"name": "Python",
"bytes": "872266"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
from agents.agent import Agent
from agents import MonteCarloAgent
class HumanAgent(Agent):
"""This agent is controlled by a human, who inputs moves via stdin."""
def __init__(self, reversi, color, **kwargs):
self.reversi = reversi
self.color = color
def reset(self):
pass
def observe_win(self, winner):
pass
def get_action(self, game_state, legal):
if not legal:
return None
choice = None
while True:
raw_choice = input('Enter a move x,y: ')
if raw_choice == 'pass':
return None
elif raw_choice == 'exit' or raw_choice == 'quit':
quit()
elif raw_choice.startswith('helpme'):
sim_time = 5
s = raw_choice.split()
if len(s) == 2 and s[1].isdigit():
sim_time = int(s[1])
self.get_help(game_state, legal, sim_time)
continue
elif len(raw_choice) != 3:
print('input must be 3 long, formatted x,y')
continue
if raw_choice[1] != ',':
print('comma separator not found.')
continue
if not raw_choice[0].isdigit() or not raw_choice[2].isdigit():
print('couldn\'t determine x,y from your input.')
continue
choice = (int(raw_choice[0]), int(raw_choice[2]))
if choice not in legal:
print('not a legal move. try again.')
continue
else:
break
return choice
def get_help(self, game_state, legal, sim_time):
mc = MonteCarloAgent(self.reversi, self.color, sim_time=sim_time)
action = mc.get_action(game_state, legal)
print('suggested move: {}'.format(action))
| {
"content_hash": "1c608dfa8c287c3105fe0a9b4075a3ea",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 74,
"avg_line_length": 32.771929824561404,
"alnum_prop": 0.5112419700214133,
"repo_name": "andysalerno/reversi-ai",
"id": "98d5afc6c9d4cf153c4d748ff7145f09ebbffc0e",
"size": "1868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "agents/human_agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30482"
}
],
"symlink_target": ""
} |
"""The tests for an update of the Twitch component."""
from requests import HTTPError
from twitch.resources import Channel, Follow, Stream, Subscription, User
from homeassistant.components import sensor
from homeassistant.const import CONF_CLIENT_ID
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock, patch
ENTITY_ID = "sensor.channel123"
CONFIG = {
sensor.DOMAIN: {
"platform": "twitch",
CONF_CLIENT_ID: "1234",
"channels": ["channel123"],
}
}
CONFIG_WITH_OAUTH = {
sensor.DOMAIN: {
"platform": "twitch",
CONF_CLIENT_ID: "1234",
"channels": ["channel123"],
"token": "9876",
}
}
USER_ID = User({"id": 123, "display_name": "channel123", "logo": "logo.png"})
STREAM_OBJECT_ONLINE = Stream(
{
"channel": {"game": "Good Game", "status": "Title"},
"preview": {"medium": "stream-medium.png"},
}
)
CHANNEL_OBJECT = Channel({"followers": 42, "views": 24})
OAUTH_USER_ID = User({"id": 987})
SUB_ACTIVE = Subscription({"created_at": "2020-01-20T21:22:42", "is_gift": False})
FOLLOW_ACTIVE = Follow({"created_at": "2020-01-20T21:22:42"})
async def test_init(hass):
"""Test initial config."""
channels = MagicMock()
channels.get_by_id.return_value = CHANNEL_OBJECT
streams = MagicMock()
streams.get_stream_by_user.return_value = None
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels = channels
twitch_mock.streams = streams
with patch(
"homeassistant.components.twitch.sensor.TwitchClient", return_value=twitch_mock
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG) is True
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.state == "offline"
assert sensor_state.name == "channel123"
assert sensor_state.attributes["icon"] == "mdi:twitch"
assert sensor_state.attributes["friendly_name"] == "channel123"
assert sensor_state.attributes["views"] == 24
assert sensor_state.attributes["followers"] == 42
async def test_offline(hass):
"""Test offline state."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock.streams.get_stream_by_user.return_value = None
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG) is True
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.state == "offline"
assert sensor_state.attributes["entity_picture"] == "logo.png"
async def test_streaming(hass):
"""Test streaming state."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock.streams.get_stream_by_user.return_value = STREAM_OBJECT_ONLINE
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG) is True
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.state == "streaming"
assert sensor_state.attributes["entity_picture"] == "stream-medium.png"
assert sensor_state.attributes["game"] == "Good Game"
assert sensor_state.attributes["title"] == "Title"
async def test_oauth_without_sub_and_follow(hass):
"""Test state with oauth."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock._oauth_token = True # A replacement for the token
twitch_mock.users.get.return_value = OAUTH_USER_ID
twitch_mock.users.check_subscribed_to_channel.side_effect = HTTPError()
twitch_mock.users.check_follows_channel.side_effect = HTTPError()
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG_WITH_OAUTH)
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.attributes["subscribed"] is False
assert sensor_state.attributes["following"] is False
async def test_oauth_with_sub(hass):
"""Test state with oauth and sub."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock._oauth_token = True # A replacement for the token
twitch_mock.users.get.return_value = OAUTH_USER_ID
twitch_mock.users.check_subscribed_to_channel.return_value = SUB_ACTIVE
twitch_mock.users.check_follows_channel.side_effect = HTTPError()
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG_WITH_OAUTH)
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.attributes["subscribed"] is True
assert sensor_state.attributes["subscribed_since"] == "2020-01-20T21:22:42"
assert sensor_state.attributes["subscription_is_gifted"] is False
assert sensor_state.attributes["following"] is False
async def test_oauth_with_follow(hass):
"""Test state with oauth and follow."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock._oauth_token = True # A replacement for the token
twitch_mock.users.get.return_value = OAUTH_USER_ID
twitch_mock.users.check_subscribed_to_channel.side_effect = HTTPError()
twitch_mock.users.check_follows_channel.return_value = FOLLOW_ACTIVE
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG_WITH_OAUTH)
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.attributes["subscribed"] is False
assert sensor_state.attributes["following"] is True
assert sensor_state.attributes["following_since"] == "2020-01-20T21:22:42"
| {
"content_hash": "ac36fc6d9baf2d23d09918be7ec5b537",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 87,
"avg_line_length": 37.35,
"alnum_prop": 0.6915067678119887,
"repo_name": "sdague/home-assistant",
"id": "33afde2a076380c829596c8a5ab3fc645e964170",
"size": "6723",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "tests/components/twitch/test_twitch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "27869189"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
"""
Special models useful for complex compound models where control is needed over
which outputs from a source model are mapped to which inputs of a target model.
"""
from .core import FittableModel
__all__ = ['Mapping', 'Identity']
class Mapping(FittableModel):
"""
Allows inputs to be reordered, duplicated or dropped.
Parameters
----------
mapping : tuple
A tuple of integers representing indices of the inputs to this model
to return and in what order to return them. See
:ref:`compound-model-mappings` for more details.
n_inputs : int
Number of inputs; if `None` (default) then ``max(mapping) + 1`` is
used (i.e. the highest input index used in the mapping).
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Raises
------
TypeError
Raised when number of inputs is less that ``max(mapping)``.
Examples
--------
>>> from astropy.modeling.models import Polynomial2D, Shift, Mapping
>>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
>>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1)
>>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2)
>>> model(1, 2) # doctest: +FLOAT_CMP
(17.0, 14.2)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, mapping, n_inputs=None, name=None, meta=None):
if n_inputs is None:
self._inputs = tuple('x' + str(idx)
for idx in range(max(mapping) + 1))
else:
self._inputs = tuple('x' + str(idx)
for idx in range(n_inputs))
self._outputs = tuple('x' + str(idx) for idx in range(len(mapping)))
self._mapping = mapping
super().__init__(name=name, meta=meta)
@property
def inputs(self):
"""
The name(s) of the input variable(s) on which a model is evaluated.
"""
return self._inputs
@property
def outputs(self):
"""The name(s) of the output(s) of the model."""
return self._outputs
@property
def mapping(self):
"""Integers representing indices of the inputs."""
return self._mapping
def __repr__(self):
if self.name is None:
return '<Mapping({0})>'.format(self.mapping)
else:
return '<Mapping({0}, name={1})>'.format(self.mapping, self.name)
def evaluate(self, *args):
if len(args) != self.n_inputs:
name = self.name if self.name is not None else "Mapping"
raise TypeError('{0} expects {1} inputs; got {2}'.format(
name, self.n_inputs, len(args)))
result = tuple(args[idx] for idx in self._mapping)
if self.n_outputs == 1:
return result[0]
return result
@property
def inverse(self):
"""
A `Mapping` representing the inverse of the current mapping.
Raises
------
`NotImplementedError`
An inverse does no exist on mappings that drop some of its inputs
(there is then no way to reconstruct the inputs that were dropped).
"""
try:
mapping = tuple(self.mapping.index(idx)
for idx in range(self.n_inputs))
except ValueError:
raise NotImplementedError(
"Mappings such as {0} that drop one or more of their inputs "
"are not invertible at this time.".format(self.mapping))
inv = self.__class__(mapping)
inv._inputs = self._outputs
inv._outputs = self._inputs
return inv
class Identity(Mapping):
"""
Returns inputs unchanged.
This class is useful in compound models when some of the inputs must be
passed unchanged to the next model.
Parameters
----------
n_inputs : int
Specifies the number of inputs this identity model accepts.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Examples
--------
Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs::
>>> from astropy.modeling.models import (Polynomial1D, Shift, Scale,
... Identity)
>>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2)
>>> model(1,1) # doctest: +FLOAT_CMP
(2.4, 2.0)
>>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP
(1.0, 1.0)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, n_inputs, name=None, meta=None):
mapping = tuple(range(n_inputs))
super().__init__(mapping, name=name, meta=meta)
def __repr__(self):
if self.name is None:
return '<Identity({0})>'.format(self.n_inputs)
else:
return '<Identity({0}, name={1})>'.format(self.n_inputs, self.name)
@property
def inverse(self):
"""
The inverse transformation.
In this case of `Identity`, ``self.inverse is self``.
"""
return self
| {
"content_hash": "173d02e96fc5fda6d69b839c24c613a0",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 79,
"avg_line_length": 31.16949152542373,
"alnum_prop": 0.573500090628965,
"repo_name": "funbaker/astropy",
"id": "314bd007e09895324c91f58ef05cd30726e97b1b",
"size": "5517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astropy/modeling/mappings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "367279"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "8331581"
},
{
"name": "TeX",
"bytes": "805"
}
],
"symlink_target": ""
} |
"""
Utility functions for dealing with the Design/Service registry.
TODO: M21 replace with OCCI calls!
"""
import os
from keystoneclient.v2_0 import client
def list_services(token, endpoint='http://localhost:35357/v2.0'):
"""
List services registered within this CC.
:param token: The token.
:param endpoint: Optional design module uri.
"""
# Update from OpS variable.
if 'DESIGN_URI' in os.environ:
endpoint = os.environ['DESIGN_URI']
res = {}
design = client.Client(token=token, endpoint=endpoint)
for item in design.services.list():
res[item.name] = {'description': item.description}
return res
def _get_url_type(item, **kwargs):
"""
Get the correct type of endpoint. OpenStack has both a public and internal
URL for services.
By default we select internalurl, otherwise if url_type is set to public
in kwargs then select that URL.
:param item: the openstack service whose API is requested.
"""
# default to internal as in original implementation
url_type = kwargs.get('url_type', 'internal')
if url_type == 'internal':
endpoint = item.internalurl
elif url_type == 'public':
endpoint = item.publicurl
else:
raise AttributeError('Unrecognised URL type: ' + url_type +
'. Supported types: public, internal')
return endpoint
def get_service_endpoints(stype, token,
endpoint='http://localhost:35357/v2.0', **kwargs):
"""
Retrieve all endpoints for a given service type
:param stype: service type
:param token: The token.
:param endpoint: Optional design module uri.
:param args: Optional arguments.
:return:
"""
# Update from OpS variable.
if 'DESIGN_URI' in os.environ:
endpoint = os.environ['DESIGN_URI']
if 'tenant_name' in kwargs:
tname = kwargs['tenant_name']
else:
raise Exception('Tenant Name missing from request')
design = client.Client(token=token, endpoint=endpoint)
raw_token = design.get_raw_token_from_identity_service(endpoint, token=token, tenant_name=tname)
sc = raw_token.service_catalog
endpoints = sc.get_endpoints(service_type=stype)
return endpoints
def get_service_endpoint(identifier, token,
endpoint='http://localhost:35357/v2.0', **kwargs):
"""
Retrieve an endpoint for a particular service addressable by this CC.
Returns None if no endpoint could be found.
:param identifier: Identifier for the service.
:param token: The token.
:param endpoint: Optional design module uri.
:param args: Optional arguments.
"""
# Update from OpS variable.
if 'DESIGN_URI' in os.environ:
endpoint = os.environ['DESIGN_URI']
design = client.Client(token=token, endpoint=endpoint)
if 'tenant_name' in kwargs:
tname = kwargs['tenant_name']
else:
tname = 'demo'
if 'region' in kwargs:
region = kwargs['region']
else:
region = 'RegionOne'
# find tenant id
tenant_id = None
for item in design.tenants.list():
if item.name == tname:
tenant_id = item.id
if tenant_id is None:
return None
# find service description.
service_ids = []
for item in design.services.list():
if item.type == identifier:
service_ids.append(design.services.get(item.id).id)
if len(service_ids) == 0:
return None
res = None
if 'allow_multiple' in kwargs and kwargs['allow_multiple']:
res = []
for item in design.endpoints.list():
for service_id in service_ids:
if service_id == item.service_id and region == item.region:
if 'allow_multiple' in kwargs and kwargs['allow_multiple']:
res.append(_get_url_type(item, **kwargs))
else:
res = _get_url_type(item, **kwargs)
if '%(tenant_id)s' in res:
res = res.replace('%(tenant_id)s', tenant_id)
elif '$(tenant_id)s' in res:
res = res.replace('$(tenant_id)s', tenant_id)
return res
| {
"content_hash": "833bac8927d14be45981bce5217012c3",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 100,
"avg_line_length": 30.04964539007092,
"alnum_prop": 0.612697663441114,
"repo_name": "icclab/hurtle_cc_sdk",
"id": "0b57c0e198928126110289ba0ff7a6a74d916d68",
"size": "4892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "125583"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
} |
"""
Setup script for LO-PHI Network Services
(c) 2015 Massachusetts Institute of Technology
"""
import os
from distutils.core import setup
DIR_ROOT = "/opt/lophi"
def get_data_files(rel_dir):
install_list = []
for x in os.walk(rel_dir):
directory = x[0]
install_files = []
# Get all the .py files
for filename in x[2]:
if not filename.endswith(".pyc"):
install_files.append(os.path.join(directory, filename))
if len(install_files) > 0:
install_path = os.path.join(DIR_ROOT, directory)
install_list.append((install_path, install_files))
return install_list
def get_packages(rel_dir):
packages = [rel_dir]
for x in os.walk(rel_dir):
# break into parts
base = list(os.path.split(x[0]))
if base[0] == "":
del base[0]
for mod_name in x[1]:
packages.append(".".join(base + [mod_name]))
return packages
packages = get_packages('lophinet')
data_files = get_data_files("tftpboot")
data_files += get_data_files("conf")
data_files += get_data_files("bin")
data_files += [(os.path.join(DIR_ROOT, 'samba', 'images'), [])]
setup(name='LO-PHI-Net-Services',
version='1.0',
description='This contains the LO-PHI Network Services binaries and '
'configuration files that includes the following services: '
'DNS, DHCP/PXE, TFTP, and a LO-PHI Control service.',
author='Chad Spensky and Hongyi Hu',
author_email='lophi@mit.edu',
packages=packages,
data_files=data_files
)
| {
"content_hash": "4e21004011a84ae1fd3bcbbf3758178b",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 27.083333333333332,
"alnum_prop": 0.5981538461538461,
"repo_name": "mit-ll/LO-PHI",
"id": "83c08bd666eb4239a2251823f713b8428c63f694",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lophi-net-services/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "58723"
},
{
"name": "Elixir",
"bytes": "18208"
},
{
"name": "Emacs Lisp",
"bytes": "1368"
},
{
"name": "Groff",
"bytes": "1900"
},
{
"name": "M4",
"bytes": "2284"
},
{
"name": "Makefile",
"bytes": "64810"
},
{
"name": "Protocol Buffer",
"bytes": "1803"
},
{
"name": "Python",
"bytes": "1220515"
},
{
"name": "Shell",
"bytes": "23976"
}
],
"symlink_target": ""
} |
import sys
from plenum.common.constants import PREPREPARE
from plenum.test.delayers import ppDelay, msg_rep_delay
from plenum.test.helper import sdk_send_random_and_check, \
sdk_send_random_request
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.stasher import delay_rules
from plenum.test.test_node import ensureElectionsDone
from plenum.test.view_change.helper import check_prepare_certificate
from plenum.test.view_change_service.helper import trigger_view_change
from stp_core.loop.eventually import eventually
def test_view_change_with_different_prepare_certificate(looper, txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
"""
Check that a node without pre-prepare but with quorum of prepares wouldn't
use this transaction as a last in prepare certificate
"""
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 1)
slow_node = txnPoolNodeSet[-1]
# delay preprepares and message response with preprepares.
with delay_rules(slow_node.nodeIbStasher, ppDelay(delay=sys.maxsize)):
with delay_rules(slow_node.nodeIbStasher,
msg_rep_delay(delay=sys.maxsize,
types_to_delay=[PREPREPARE, ])):
last_ordered = slow_node.master_replica.last_ordered_3pc
sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
looper.run(eventually(check_prepare_certificate,
txnPoolNodeSet[0:-1],
last_ordered[1] + 1))
trigger_view_change(txnPoolNodeSet)
assert slow_node.master_replica._ordering_service.l_last_prepared_certificate_in_view() == \
(0, last_ordered[1])
ensureElectionsDone(looper, txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
| {
"content_hash": "52f57243c94da649ad8b0de580c9886f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 104,
"avg_line_length": 51.627906976744185,
"alnum_prop": 0.6563063063063063,
"repo_name": "evernym/plenum",
"id": "1d8a950fef7bc7213de6d0e715ea0c7707ebf7d0",
"size": "2220",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/view_change_with_delays/test_view_change_with_different_prepare_certificate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1537915"
}
],
"symlink_target": ""
} |
You are given an n x n 2D matrix representing an image.
Rotate the image by 90 degrees (clockwise).
Follow up:
Could you do this in-place?
class Solution:
# @param matrix, a list of lists of integers
# @return nothing (void), do not return anything, modify matrix in-place instead.
def rotate(self, matrix):
n = len(matrix)
for i in xrange(n):
for j in xrange(i+1, n): #注意这里从i+1开始!!!
matrix[j][i], matrix[i][j] = matrix[i][j], matrix[j][i]
for i in xrange(n):
matrix[i].reverse()
#Refer: https://leetcode.com/discuss/20589/a-common-method-to-rotate-the-image
| {
"content_hash": "f412a9a294859a9b060e3cfa9e26c670",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 85,
"avg_line_length": 31.523809523809526,
"alnum_prop": 0.6057401812688822,
"repo_name": "UmassJin/Leetcode",
"id": "96321d859452fb16caf8b272e59cbc119f240baf",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Array/Rotate_Image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "717672"
}
],
"symlink_target": ""
} |
"""
WSGI config for amazondealsapi project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "amazondealsapi.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "amazondealsapi.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "9e075db4e4aeabe0510adcd87a55545e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 45.09375,
"alnum_prop": 0.7962577962577962,
"repo_name": "Aameer/amazon_deals_api",
"id": "1b5b2621419bf206218879f278deeaf96c74d96a",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amazondealsapi/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3523"
},
{
"name": "Python",
"bytes": "11549"
}
],
"symlink_target": ""
} |
import errno
import hashlib
import io
import os
import posixpath
from contextlib import contextmanager
from contextlib import ExitStack
from contextlib import suppress
from ftplib import Error as FTPError
from inspect import cleandoc
from pathlib import Path
from subprocess import CalledProcessError
from subprocess import CompletedProcess
from subprocess import DEVNULL
from subprocess import PIPE
from subprocess import STDOUT
from typing import Any
from typing import Callable
from typing import ContextManager
from typing import Dict
from typing import Generator
from typing import Iterable
from typing import Iterator
from typing import Mapping
from typing import NoReturn
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from warnings import warn
from werkzeug import urls
from lektor.compat import TemporaryDirectory
from lektor.exception import LektorException
from lektor.utils import bool_from_string
from lektor.utils import locate_executable
from lektor.utils import portable_popen
if TYPE_CHECKING: # pragma: no cover
from _typeshed import StrOrBytesPath
from _typeshed import StrPath
from lektor.environment import Environment
@contextmanager
def _ssh_key_file(
credentials: Optional[Mapping[str, str]]
) -> Iterator[Optional["StrPath"]]:
with ExitStack() as stack:
key_file: Optional["StrPath"]
key_file = credentials.get("key_file") if credentials else None
key = credentials.get("key") if credentials else None
if not key_file and key:
if ":" in key:
key_type, _, key = key.partition(":")
key_type = key_type.upper()
else:
key_type = "RSA"
key_file = Path(stack.enter_context(TemporaryDirectory()), "keyfile")
with key_file.open("w", encoding="utf-8") as f:
f.write(f"-----BEGIN {key_type} PRIVATE KEY-----\n")
f.writelines(key[x : x + 64] + "\n" for x in range(0, len(key), 64))
f.write(f"-----END {key_type} PRIVATE KEY-----\n")
yield key_file
@contextmanager
def _ssh_command(
credentials: Optional[Mapping[str, str]], port: Optional[int] = None
) -> Iterator[Optional[str]]:
with _ssh_key_file(credentials) as key_file:
args = []
if port:
args.append(f" -p {port}")
if key_file:
args.append(f' -i "{key_file}" -o IdentitiesOnly=yes')
if args:
yield "ssh" + " ".join(args)
else:
yield None
class PublishError(LektorException):
"""Raised by publishers if something goes wrong."""
class Command(ContextManager["Command"]):
"""A wrapper around subprocess.Popen to facilitate streaming output via generator.
:param argline: Command with arguments to execute.
:param cwd: Optional. Directory in which to execute command.
:param env: Optional. Environment with which to run command.
:param capture: Default `True`. Whether to capture stdout and stderr.
:param silent: Default `False`. Discard output altogether.
:param check: Default `False`.
If set, raise ``CalledProcessError`` on non-zero return code.
:param input: Optional. A string to feed to the subprocess via stdin.
:param capture_stdout: Default `False`. Capture stdout and
return in ``CompletedProcess.stdout``.
Basic Usage
===========
To run a command, returning any output on stdout or stderr to the caller
as an iterable (generator), while checking the return code from the command:
def run_command(argline):
# This passes the output
rv = yield from Command(argline)
if rv.returncode != 0:
raise RuntimeError("Command failed!")
This could be called as follows:
for outline in run_command(('ls')):
print(outline.rstrip())
Supplying input via stdin, Capturing stdout
===========================================
The following example shows how input may be fed to a subprocess via stdin,
and how stdout may be captured for further processing.
def run_wc(input):
rv = yield from Command(('wc'), check=True, input=input, capture_stdout=True)
lines, words, chars = rv.stdout.split()
print(f"{words} words, {chars} chars")
stderr_lines = list(run_wc("a few words"))
# prints "3 words, 11 chars"
Note that ``check=True`` will cause a ``CalledProcessError`` to be raised if the
``wc`` subprocess returns a non-zero return code.
"""
def __init__(
self,
argline: Iterable[str],
cwd: Optional["StrOrBytesPath"] = None,
env: Optional[Mapping[str, str]] = None,
capture: bool = True,
silent: bool = False,
check: bool = False,
input: Optional[str] = None,
capture_stdout: bool = False,
) -> None:
kwargs: Dict[str, Any] = {"cwd": cwd}
if env:
kwargs["env"] = {**os.environ, **env}
if silent:
kwargs["stdout"] = DEVNULL
kwargs["stderr"] = DEVNULL
capture = False
if input is not None:
kwargs["stdin"] = PIPE
if capture or capture_stdout:
kwargs["stdout"] = PIPE
if capture:
kwargs["stderr"] = STDOUT if not capture_stdout else PIPE
# Python >= 3.7 has sane encoding defaults in the case that the system is
# (likely mis-)configured to use ASCII as the default encoding (PEP538).
# It also provides a way for the user to force the use of UTF-8 (PEP540).
kwargs["text"] = True
kwargs["errors"] = "replace"
self.capture = capture # b/c - unused
self.check = check
self._stdout = None
with ExitStack() as stack:
self._cmd = stack.enter_context(portable_popen(list(argline), **kwargs))
self._closer: Optional[Callable[[], None]] = stack.pop_all().close
if input is not None or capture_stdout:
self._output = self._communicate(input, capture_stdout, capture)
elif capture:
self._output = self._cmd.stdout
else:
self._output = None
def _communicate(
self, input: Optional[str], capture_stdout: bool, capture: bool
) -> Optional[Iterator[str]]:
proc = self._cmd
try:
if capture_stdout:
self._stdout, errout = proc.communicate(input)
else:
errout, _ = proc.communicate(input)
except BaseException:
proc.kill()
with suppress(CalledProcessError):
self.close()
raise
if capture:
return iter(errout.splitlines())
return None
def close(self) -> None:
"""Wait for subprocess to complete.
If check=True was passed to the constructor, raises ``CalledProcessError``
if the subprocess returns a non-zero status code.
"""
closer, self._closer = self._closer, None
if closer:
# This waits for process and closes standard file descriptors
closer()
if self.check:
rc = self._cmd.poll()
if rc != 0:
raise CalledProcessError(rc, self._cmd.args, self._stdout)
def wait(self) -> int:
"""Wait for subprocess to complete. Return status code."""
self._cmd.wait()
self.close()
return self._cmd.returncode
def result(self) -> "CompletedProcess[str]":
"""Wait for subprocess to complete. Return ``CompletedProcess`` instance.
If ``capture_stdout=True`` was passed to the constructor, the output
captured from stdout will be available on the ``.stdout`` attribute
of the return value.
"""
return CompletedProcess(self._cmd.args, self.wait(), self._stdout)
@property
def returncode(self) -> Optional[int]:
"""Return exit status of the subprocess.
Or ``None`` if the subprocess is still alive.
"""
return self._cmd.returncode
def __exit__(self, *__: Any) -> None:
self.close()
def __iter__(self) -> Generator[str, None, "CompletedProcess[str]"]:
"""A generator with yields any captured output and returns a ``CompletedProcess``.
If ``capture`` is ``True`` (the default). Both stdout and stderr are available
in the iterator output.
If ``capture_stdout`` is set, stdout is captured to a string which is made
available via ``CompletedProcess.stdout`` attribute of the return value. Stderr
output is available via the iterator output, as normal.
"""
if self._output is None:
raise RuntimeError("Not capturing")
for line in self._output:
yield line.rstrip()
return self.result()
safe_iter = __iter__ # b/c - deprecated
@property
def output(self) -> Iterator[str]: # b/c - deprecated
return self.safe_iter()
class Publisher:
def __init__(self, env: "Environment", output_path: str) -> None:
self.env = env
self.output_path = os.path.abspath(output_path)
def fail(self, message: str) -> NoReturn:
# pylint: disable=no-self-use
raise PublishError(message)
def publish(
self,
target_url: urls.URL,
credentials: Optional[Mapping[str, str]] = None,
**extra: Any,
) -> Iterator[str]:
raise NotImplementedError()
class RsyncPublisher(Publisher):
@contextmanager
def get_command(self, target_url, credentials):
credentials = credentials or {}
argline = ["rsync", "-rclzv", "--exclude=.lektor"]
target = []
env = {}
options = target_url.decode_query()
exclude = options.getlist("exclude")
for file in exclude:
argline.extend(("--exclude", file))
delete = options.get("delete", False) in ("", "on", "yes", "true", "1", None)
if delete:
argline.append("--delete-after")
with _ssh_command(credentials, target_url.port) as ssh_command:
if ssh_command:
argline.extend(("-e", ssh_command))
username = credentials.get("username") or target_url.username
if username:
target.append(username + "@")
if target_url.ascii_host is not None:
target.append(target_url.ascii_host)
target.append(":")
target.append(target_url.path.rstrip("/") + "/")
argline.append(self.output_path.rstrip("/\\") + "/")
argline.append("".join(target))
yield Command(argline, env=env)
def publish(self, target_url, credentials=None, **extra):
with self.get_command(target_url, credentials) as client:
yield from client
class FtpConnection:
def __init__(self, url, credentials=None):
credentials = credentials or {}
self.con = self.make_connection()
self.url = url
self.username = credentials.get("username") or url.username
self.password = credentials.get("password") or url.password
self.log_buffer = []
self._known_folders = set()
@staticmethod
def make_connection():
# pylint: disable=import-outside-toplevel
from ftplib import FTP
return FTP()
def drain_log(self):
log = self.log_buffer[:]
del self.log_buffer[:]
for chunk in log:
for line in chunk.splitlines():
if not isinstance(line, str):
line = line.decode("utf-8", "replace")
yield line.rstrip()
def connect(self):
options = self.url.decode_query()
log = self.log_buffer
log.append("000 Connecting to server ...")
try:
log.append(self.con.connect(self.url.ascii_host, self.url.port or 21))
except Exception as e:
log.append("000 Could not connect.")
log.append(str(e))
return False
try:
credentials = {}
if self.username:
credentials["user"] = self.username
if self.password:
credentials["passwd"] = self.password
log.append(self.con.login(**credentials))
except Exception as e:
log.append("000 Could not authenticate.")
log.append(str(e))
return False
passive = options.get("passive") in ("on", "yes", "true", "1", None)
log.append("000 Using passive mode: %s" % (passive and "yes" or "no"))
self.con.set_pasv(passive)
try:
log.append(self.con.cwd(self.url.path))
except Exception as e:
log.append(str(e))
return False
log.append("000 Connected!")
return True
def mkdir(self, path, recursive=True):
if not isinstance(path, str):
path = path.decode("utf-8")
if path in self._known_folders:
return
dirname, _ = posixpath.split(path)
if dirname and recursive:
self.mkdir(dirname)
try:
self.con.mkd(path)
except FTPError as e:
msg = str(e)
if msg[:4] != "550 ":
self.log_buffer.append(str(e))
return
self._known_folders.add(path)
def append(self, filename, data):
if not isinstance(filename, str):
filename = filename.decode("utf-8")
input = io.BytesIO(data.encode("utf-8"))
try:
self.con.storbinary("APPE " + filename, input)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def get_file(self, filename, out=None):
if not isinstance(filename, str):
filename = filename.decode("utf-8")
getvalue = False
if out is None:
out = io.BytesIO()
getvalue = True
try:
self.con.retrbinary("RETR " + filename, out.write)
except FTPError as e:
msg = str(e)
if msg[:4] != "550 ":
self.log_buffer.append(e)
return None
if getvalue:
return out.getvalue().decode("utf-8")
return out
def upload_file(self, filename, src, mkdir=False):
if isinstance(src, str):
src = io.BytesIO(src.encode("utf-8"))
if mkdir:
directory = posixpath.dirname(filename)
if directory:
self.mkdir(directory, recursive=True)
if not isinstance(filename, str):
filename = filename.decode("utf-8")
try:
self.con.storbinary("STOR " + filename, src, blocksize=32768)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def rename_file(self, src, dst):
try:
self.con.rename(src, dst)
except FTPError as e:
self.log_buffer.append(str(e))
try:
self.con.delete(dst)
except Exception as e:
self.log_buffer.append(str(e))
try:
self.con.rename(src, dst)
except Exception as e:
self.log_buffer.append(str(e))
def delete_file(self, filename):
if isinstance(filename, str):
filename = filename.encode("utf-8")
try:
self.con.delete(filename)
except Exception as e:
self.log_buffer.append(str(e))
def delete_folder(self, filename):
if isinstance(filename, str):
filename = filename.encode("utf-8")
try:
self.con.rmd(filename)
except Exception as e:
self.log_buffer.append(str(e))
self._known_folders.discard(filename)
class FtpTlsConnection(FtpConnection):
@staticmethod
def make_connection():
# pylint: disable=import-outside-toplevel
from ftplib import FTP_TLS
return FTP_TLS()
def connect(self):
connected = super().connect()
if connected:
# Upgrade data connection to TLS.
self.con.prot_p() # pylint: disable=no-member
return connected
class FtpPublisher(Publisher):
connection_class = FtpConnection
@staticmethod
def read_existing_artifacts(con):
contents = con.get_file(".lektor/listing")
if not contents:
return {}, set()
duplicates = set()
rv = {}
# Later records override earlier ones. There can be duplicate
# entries if the file was not compressed.
for line in contents.splitlines():
items = line.split("|")
if len(items) == 2:
if not isinstance(items[0], str):
artifact_name = items[0].decode("utf-8")
else:
artifact_name = items[0]
if artifact_name in rv:
duplicates.add(artifact_name)
rv[artifact_name] = items[1]
return rv, duplicates
def iter_artifacts(self):
"""Iterates over all artifacts in the build folder and yields the
artifacts.
"""
for dirpath, dirnames, filenames in os.walk(self.output_path):
dirnames[:] = [x for x in dirnames if not self.env.is_ignored_artifact(x)]
for filename in filenames:
if self.env.is_ignored_artifact(filename):
continue
full_path = os.path.join(self.output_path, dirpath, filename)
local_path = full_path[len(self.output_path) :].lstrip(os.path.sep)
if os.path.altsep:
local_path = local_path.lstrip(os.path.altsep)
h = hashlib.sha1()
try:
with open(full_path, "rb") as f:
while 1:
item = f.read(4096)
if not item:
break
h.update(item)
except IOError as e:
if e.errno != errno.ENOENT:
raise
yield (
local_path.replace(os.path.sep, "/"),
full_path,
h.hexdigest(),
)
@staticmethod
def get_temp_filename(filename):
dirname, basename = posixpath.split(filename)
return posixpath.join(dirname, "." + basename + ".tmp")
def upload_artifact(self, con, artifact_name, source_file, checksum):
with open(source_file, "rb") as source:
tmp_dst = self.get_temp_filename(artifact_name)
con.log_buffer.append("000 Updating %s" % artifact_name)
con.upload_file(tmp_dst, source, mkdir=True)
con.rename_file(tmp_dst, artifact_name)
con.append(".lektor/listing", "%s|%s\n" % (artifact_name, checksum))
def consolidate_listing(self, con, current_artifacts):
server_artifacts, duplicates = self.read_existing_artifacts(con)
known_folders = set()
for artifact_name in current_artifacts.keys():
known_folders.add(posixpath.dirname(artifact_name))
for artifact_name, checksum in server_artifacts.items():
if artifact_name not in current_artifacts:
con.log_buffer.append("000 Deleting %s" % artifact_name)
con.delete_file(artifact_name)
folder = posixpath.dirname(artifact_name)
if folder not in known_folders:
con.log_buffer.append("000 Deleting %s" % folder)
con.delete_folder(folder)
if duplicates or server_artifacts != current_artifacts:
listing = []
for artifact_name, checksum in current_artifacts.items():
listing.append("%s|%s\n" % (artifact_name, checksum))
listing.sort()
con.upload_file(".lektor/.listing.tmp", "".join(listing))
con.rename_file(".lektor/.listing.tmp", ".lektor/listing")
def publish(self, target_url, credentials=None, **extra):
con = self.connection_class(target_url, credentials)
connected = con.connect()
for event in con.drain_log():
yield event
if not connected:
return
yield "000 Reading server state ..."
con.mkdir(".lektor")
committed_artifacts, _ = self.read_existing_artifacts(con)
for event in con.drain_log():
yield event
yield "000 Begin sync ..."
current_artifacts = {}
for artifact_name, filename, checksum in self.iter_artifacts():
current_artifacts[artifact_name] = checksum
if checksum != committed_artifacts.get(artifact_name):
self.upload_artifact(con, artifact_name, filename, checksum)
for event in con.drain_log():
yield event
yield "000 Sync done!"
yield "000 Consolidating server state ..."
self.consolidate_listing(con, current_artifacts)
for event in con.drain_log():
yield event
yield "000 All done!"
class FtpTlsPublisher(FtpPublisher):
connection_class = FtpTlsConnection
class GitRepo(ContextManager["GitRepo"]):
"""A temporary git repository.
This class provides some lower-level utility methods which may be
externally useful, but the main use case is:
def publish(html_output):
gitrepo = GitRepo(html_output)
yield from gitrepo.publish_ghpages(
push_url="git@github.com:owner/repo.git",
branch="gh-pages"
)
:param work_tree: The work tree for the repository.
"""
def __init__(self, work_tree: "StrPath") -> None:
environ = {**os.environ, "GIT_WORK_TREE": str(work_tree)}
for what, default in [("NAME", "Lektor Bot"), ("EMAIL", "bot@getlektor.com")]:
value = (
environ.get(f"GIT_AUTHOR_{what}")
or environ.get(f"GIT_COMMITTER_{what}")
or default
)
for key in f"GIT_AUTHOR_{what}", f"GIT_COMMITTER_{what}":
environ[key] = environ.get(key) or value
with ExitStack() as stack:
environ["GIT_DIR"] = stack.enter_context(TemporaryDirectory(suffix=".git"))
self.environ = environ
self.run("init", "--quiet")
self._exit_stack = stack.pop_all()
def __exit__(self, *__: Any) -> None:
self._exit_stack.close()
def _popen(self, args: Sequence[str], **kwargs: Any) -> Command:
cmd = ["git"]
cmd.extend(args)
return Command(cmd, env=self.environ, **kwargs)
def popen(
self,
*args: str,
check: bool = True,
input: Optional[str] = None,
capture_stdout: bool = False,
) -> Command:
"""Run a git subcommand."""
return self._popen(
args, check=check, input=input, capture_stdout=capture_stdout
)
def run(
self,
*args: str,
check: bool = True,
input: Optional[str] = None,
capture_stdout: bool = False,
) -> "CompletedProcess[str]":
"""Run a git subcommand and wait for completion."""
return self._popen(
args, check=check, input=input, capture_stdout=capture_stdout, capture=False
).result()
def set_ssh_credentials(self, credentials: Mapping[str, str]) -> None:
"""Set up git ssh credentials.
This repository will be configured to used whatever SSH credentials
can found in ``credentials`` (if any).
"""
stack = self._exit_stack
ssh_command = stack.enter_context(_ssh_command(credentials))
if ssh_command:
self.environ.setdefault("GIT_SSH_COMMAND", ssh_command)
def set_https_credentials(self, credentials: Mapping[str, str]) -> None:
"""Set up git http(s) credentials.
This repository will be configured to used whatever HTTP credentials
can found in ``credentials`` (if any).
"""
username = credentials.get("username", "")
password = credentials.get("password")
if username or password:
userpass = f"{username}:{password}" if password else username
git_dir = self.environ["GIT_DIR"]
cred_file = Path(git_dir, "lektor_cred_file")
# pylint: disable=unspecified-encoding
cred_file.write_text(f"https://{userpass}@github.com\n")
self.run("config", "credential.helper", f'store --file "{cred_file}"')
def add_to_index(self, filename: str, content: str) -> None:
"""Create a file in the index.
This creates file named ``filename`` with content ``content`` in the git
index.
"""
oid = self.run(
"hash-object", "-w", "--stdin", input=content, capture_stdout=True
).stdout.strip()
self.run("update-index", "--add", "--cacheinfo", "100644", oid, filename)
def publish_ghpages(
self,
push_url: str,
branch: str,
cname: Optional[str] = None,
preserve_history: bool = True,
) -> Iterator[str]:
"""Publish the contents of the work tree to GitHub pages.
:param push_url: The URL to push to.
:param branch: The branch to push to
:param cname: Optional. Create a top-level ``CNAME`` with given contents.
"""
refspec = f"refs/heads/{branch}"
if preserve_history:
yield "Fetching existing head"
fetch_cmd = self.popen("fetch", "--depth=1", push_url, refspec, check=False)
yield from _prefix_output(fetch_cmd)
if fetch_cmd.returncode == 0:
# If fetch was succesful, reset HEAD to remote head
yield from _prefix_output(self.popen("reset", "--soft", "FETCH_HEAD"))
else:
# otherwise assume remote branch does not exist
yield f"Creating new branch {branch}"
# At this point, the index is still empty. Add all but .lektor dir to index
yield from _prefix_output(
self.popen("add", "--force", "--all", "--", ".", ":(exclude).lektor")
)
if cname is not None:
self.add_to_index("CNAME", f"{cname}\n")
# Check for changes
diff_cmd = self.popen("diff", "--cached", "--exit-code", "--quiet", check=False)
yield from _prefix_output(diff_cmd)
if diff_cmd.returncode == 0:
yield "No changes to publish☺"
elif diff_cmd.returncode == 1:
yield "Creating commit"
yield from _prefix_output(
self.popen("commit", "--quiet", "--message", "Synchronized build")
)
push_cmd = ["push", push_url, f"HEAD:{refspec}"]
if not preserve_history:
push_cmd.insert(1, "--force")
yield "Pushing to github"
yield from _prefix_output(self.popen(*push_cmd))
yield "Success!"
else:
diff_cmd.result().check_returncode() # raise error
def _prefix_output(lines: Iterable[str], prefix: str = "> ") -> Iterator[str]:
"""Add prefix to lines."""
return (f"{prefix}{line}" for line in lines)
class GithubPagesPublisher(Publisher):
"""Publish to GitHub pages."""
def publish(
self,
target_url: urls.URL,
credentials: Optional[Mapping[str, str]] = None,
**extra: Any,
) -> Iterator[str]:
if not locate_executable("git"):
self.fail("git executable not found; cannot deploy.")
push_url, branch, cname, preserve_history, warnings = self._parse_url(
target_url
)
creds = self._parse_credentials(credentials, target_url)
yield from iter(warnings)
with GitRepo(self.output_path) as repo:
if push_url.startswith("https:"):
repo.set_https_credentials(creds)
else:
repo.set_ssh_credentials(creds)
yield from repo.publish_ghpages(push_url, branch, cname, preserve_history)
def _parse_url(
self, target_url: urls.URL
) -> Tuple[str, str, Optional[str], bool, Sequence[str]]:
if not target_url.host:
self.fail("github owner missing from target URL")
gh_owner = target_url.host.lower()
gh_project = target_url.path.strip("/").lower()
if not gh_project:
self.fail("github project missing from target URL")
params = target_url.decode_query()
cname = params.get("cname")
branch = params.get("branch")
preserve_history = bool_from_string(params.get("preserve_history"), True)
warnings = []
if not branch:
if gh_project == f"{gh_owner}.github.io":
warnings.extend(
cleandoc(self._EXPLICIT_BRANCH_SUGGESTED_MSG).splitlines()
)
warn(
" ".join(
cleandoc(self._DEFAULT_BRANCH_DEPRECATION_MSG).splitlines()
),
category=DeprecationWarning,
)
branch = "master"
else:
branch = "gh-pages"
if target_url.scheme in ("ghpages", "ghpages+ssh"):
push_url = f"ssh://git@github.com/{gh_owner}/{gh_project}.git"
default_port = 22
else:
push_url = f"https://github.com/{gh_owner}/{gh_project}.git"
default_port = 443
if target_url.port and target_url.port != default_port:
self.fail("github does not support pushing to non-standard ports")
return push_url, branch, cname, preserve_history, warnings
_EXPLICIT_BRANCH_SUGGESTED_MSG = """
================================================================
WARNING!!! You should explicitly set the name of the published
branch of your GitHub pages repository.
The default branch for new GitHub pages repositories has changed
to 'main', but Lektor still defaults to the old value, 'master'.
In a future version of Lektor, the default branch name will
changed to match the new GitHub default.
For details, see
https://getlektor.com/docs/deployment/ghpages/#pushing-to-an-explicit-branch
================================================================
"""
_DEFAULT_BRANCH_DEPRECATION_MSG = """
Currently, by default, Lektor pushes to the 'master' branch when
deploying to GitHub pages repositories. In a future version of
Lektor, the default branch will GitHub's new default, 'main'.
It is suggest that you explicitly set which branch to push to.
"""
@staticmethod
def _parse_credentials(
credentials: Optional[Mapping[str, str]], target_url: urls.URL
) -> Mapping[str, str]:
creds = dict(credentials or {})
# Fill in default username/password from target url
for key, default in [
("username", target_url.username),
("password", target_url.password),
]:
if not creds.get(key) and default:
creds[key] = default
return creds
builtin_publishers = {
"rsync": RsyncPublisher,
"ftp": FtpPublisher,
"ftps": FtpTlsPublisher,
"ghpages": GithubPagesPublisher,
"ghpages+https": GithubPagesPublisher,
"ghpages+ssh": GithubPagesPublisher,
}
def publish(env, target, output_path, credentials=None, **extra):
url = urls.url_parse(str(target))
publisher = env.publishers.get(url.scheme)
if publisher is None:
raise PublishError('"%s" is an unknown scheme.' % url.scheme)
return publisher(env, output_path).publish(url, credentials, **extra)
| {
"content_hash": "98e94f5a0af6794f6d516ea9eb1074e1",
"timestamp": "",
"source": "github",
"line_count": 908,
"max_line_length": 90,
"avg_line_length": 35.41189427312775,
"alnum_prop": 0.5743608882254152,
"repo_name": "lektor/lektor",
"id": "e9344304eb59474c48ad3a7706f665edb69c61c9",
"size": "32156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lektor/publisher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2475"
},
{
"name": "HTML",
"bytes": "29220"
},
{
"name": "JavaScript",
"bytes": "1480"
},
{
"name": "Makefile",
"bytes": "1101"
},
{
"name": "Python",
"bytes": "721680"
},
{
"name": "SCSS",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "145010"
}
],
"symlink_target": ""
} |
import json
import sys
def get_root(body) :
for part in body["part"] :
if part["root"] :
return part
def get_part(id_, body) :
for part in body["part"] :
if part["id"] == id_ :
return part
def fix_param(val) :
return round(val * 10000)/10000.0
def write_part(output, part, body, indentation_level=0) :
for _ in range(indentation_level) :
output.write("\t")
if part["root"] :
output.write("0")
else :
for connection in body["connection"] :
if connection["dest"] == part["id"] :
# need to fix slot numbering
# see lines 182-186 in PartRepresentation
if get_part(connection["src"],
body)["type"] == "CoreComponent":
output.write(str(connection["srcSlot"]))
else :
output.write(str(connection["srcSlot"] - 1))
output.write(" ")
output.write(part["type"])
output.write(" ")
output.write(str(part["id"]))
output.write(" ")
output.write(str(part["orientation"]))
output.write(" ")
output.write(str(part["color"]))
if "evolvableParam" in part :
for param in part["evolvableParam"] :
output.write(" ")
output.write(str(fix_param(param["paramValue"])))
output.write("\n")
if "connection" in body :
for connection in body["connection"] :
if connection["src"] == part["id"] :
write_part(output, get_part(connection["dest"], body),
body, indentation_level+1)
def write_body(output, body):
write_part(output, get_root(body), body)
def write_brain(output, brain):
if "connection" in brain: #i no motors then no connections
for connection in brain["connection"] :
#print connection["weight"]
output.write(connection["src"].split("-")[0])
output.write(" ")
output.write(connection["src"].split("-")[1])
output.write(" ")
output.write(connection["dest"].split("-")[0])
output.write(" ")
output.write(connection["dest"].split("-")[1])
output.write(" ")
output.write(str(connection["weight"]))
output.write("\n")
output.write("\n")
if "neuron" in brain :
for neuron in brain["neuron"] :
if neuron["layer"] != "input" :
output.write(neuron["id"].split("-")[0])
output.write(" ")
output.write(neuron["id"].split("-")[1])
output.write(" ")
if neuron["type"] == "oscillator" :
output.write("oscillator ")
output.write(str(neuron["period"]))
output.write(" ")
output.write(str(neuron["phaseOffset"]))
output.write(" ")
output.write(str(neuron["gain"]))
else :
output.write(str(neuron["bias"]))
output.write("\n")
if __name__ == "__main__":
if len(sys.argv) < 3 :
print "Usage: python json_converter.py input.json output.txt"
exit()
robot = json.load(open(sys.argv[1],"r"))
output = open(sys.argv[2], "w")
write_body(output, robot["body"])
output.write("\n\n")
# if "brain" in robot :
# write_brain(output, robot["brain"])
| {
"content_hash": "9843b4b5497fa2be378edf405071581e",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 70,
"avg_line_length": 32.632075471698116,
"alnum_prop": 0.5096848800231281,
"repo_name": "portaloffreedom/robot-baby",
"id": "812de8282603c615bfa7ada81e4f235b8f5fe8aa",
"size": "3459",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "Mating/mating/test/server/json_converter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21463"
},
{
"name": "C++",
"bytes": "875246"
},
{
"name": "CMake",
"bytes": "4145"
},
{
"name": "Makefile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "45586"
},
{
"name": "TeX",
"bytes": "2972"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class SasDefinitionCreateParameters(Model):
"""The SAS definition create parameters.
:param parameters: Sas definition creation metadata in the form of
key-value pairs.
:type parameters: dict
:param sas_definition_attributes: The attributes of the SAS definition.
:type sas_definition_attributes: :class:`SasDefinitionAttributes
<azure.keyvault.models.SasDefinitionAttributes>`
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict
"""
_validation = {
'parameters': {'required': True},
}
_attribute_map = {
'parameters': {'key': 'parameters', 'type': '{str}'},
'sas_definition_attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, parameters, sas_definition_attributes=None, tags=None):
self.parameters = parameters
self.sas_definition_attributes = sas_definition_attributes
self.tags = tags
| {
"content_hash": "7690b111820fa525e7ea5bd88511e7ed",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 94,
"avg_line_length": 35.766666666666666,
"alnum_prop": 0.6672879776328052,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "a8e6cd3e7b7c1f82f6e32f901f26bd653f0050f1",
"size": "1547",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "azure-keyvault/azure/keyvault/models/sas_definition_create_parameters.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from util import PySettings
class Config(PySettings):
FONT = 'sans 16'
FONT_DOC = 'Font used in text entries'
KEYBOARD = 'n130'
KEYBOARD_DOC = 'Keyboard model and zones layout. One of (n130, n130_sdfv, n130_dvp)'
TUTOR = 'en.basic'
TUTOR_DOC = 'Tutor generator. One of (en.basic, en.advanced, ru.basic)'
FILE = None
FILE_DOC = 'Last opened file with words'
RECENT_FILES = None
RECENT_FILES_DOC = 'Last opened file list'
FILE2TUTOR = None
FILE2TUTOR_DOC = 'Map which stores last tutor used for file'
def _add_recent_file(self, filename, limit=5):
if 'RECENT_FILES' not in self:
rf = self['RECENT_FILES'] = []
else:
rf = self['RECENT_FILES']
try:
rf.remove(filename)
except ValueError:
pass
rf.insert(0, filename)
rf[:] = rf[:limit]
def _set_tutor_for_file(self, filename, tutor):
if 'FILE2TUTOR' not in self:
f2t = self['FILE2TUTOR'] = {}
else:
f2t = self['FILE2TUTOR']
f2t[filename] = tutor
def _get_tutor_for_file(self, filename, default):
if 'FILE2TUTOR' in self:
return self['FILE2TUTOR'].get(filename, default)
return default | {
"content_hash": "047fb47cc247eb26d96651d4eb273fc9",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 88,
"avg_line_length": 26.541666666666668,
"alnum_prop": 0.5816326530612245,
"repo_name": "baverman/typetrainer",
"id": "c57f48c821ffa4923367ce4f2bf02435a26fde03",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "typetrainer/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54496"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
} |
def extractLnfreeWordpressCom(item):
'''
Parser for 'lnfree.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Imouto sae ireba ii', 'Imouto sae ireba ii', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "d34b7dd21c0cb5b79b66ac93487f0cda",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 104,
"avg_line_length": 27.842105263157894,
"alnum_prop": 0.665406427221172,
"repo_name": "fake-name/ReadableWebProxy",
"id": "981a6592574c826b198f66efde935e8d6b9af0f7",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractLnfreeWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""
.. module:: constants
:platform: linux
:synopsis:
.. moduleauthor:: Paul Fanelli <paul.fanelli@gmail.com>
.. modulecreated:: 6/26/15
"""
from os.path import join
from planet_alignment import constants
from planet_alignment.utils import path
TEST_ETC_DIR = path.get_test_etc_dir()
TEST_PLUGINS_DIR = path.get_plugins_dir()
TEST_SYSTEM_YAML = join(TEST_ETC_DIR, 'system.yaml')
TEST_SYSTEM2_YAML = join(TEST_ETC_DIR, 'system2.yaml')
TEST_BAD_CONFIG_FILE = join(TEST_ETC_DIR, 'foo.py')
TEST_WRONG_PLUGIN_FILE_TYPE = join(TEST_ETC_DIR, 'foo.txt')
TEST_PLUGIN_FOO = join(TEST_ETC_DIR, 'foo.py')
TEST_PLUGIN_BAR = join(TEST_ETC_DIR, 'bar.py')
TEST_PLUGIN_LIST_FOO_BAR = [
TEST_PLUGIN_FOO,
TEST_PLUGIN_BAR
]
TEST_PLUGIN_ALIGN1 = join(TEST_ETC_DIR, 'align1.py')
TEST_PLUGIN_LIST_ALIGN1 = [
TEST_PLUGIN_ALIGN1
]
TEST_PLUGIN_ALIGN2 = join(TEST_ETC_DIR, 'align2.py')
TEST_PLUGIN_LIST_ALIGN2 = [
TEST_PLUGIN_ALIGN2
]
TEST_PLUGIN_LIST_ALIGN1_ALIGN2 = [
TEST_PLUGIN_ALIGN1,
TEST_PLUGIN_ALIGN2
]
TEST_PLUGIN_BASE = join(TEST_PLUGINS_DIR, 'base.py')
TEST_PLUGIN_LIST_BASE = [
TEST_PLUGIN_BASE
]
TEST_DEFAULT_ALIGNMENT_DELTA_DEGREES = constants.DEFAULT_ALIGNMENT_DELTA_DEGREES
| {
"content_hash": "b1b92401f81ce897f63c33d1d76f8110",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 27.34090909090909,
"alnum_prop": 0.7098919368246052,
"repo_name": "paulfanelli/planet_alignment",
"id": "de11e924b5e69606de3a32d0ed0285830ee6666b",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "planet_alignment/test/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48726"
}
],
"symlink_target": ""
} |
import requests
import records
import passlib
r = requests.get("http://google.com")
print(r.status_code)
r = requests
r = records
p = passlib | {
"content_hash": "875c5e621e6ad2fab05378e063ffd223",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 37,
"avg_line_length": 4.410256410256411,
"alnum_prop": 0.6162790697674418,
"repo_name": "mikeckennedy/write-pythonic-code-demos",
"id": "a95e9a2260c01584494b0608dfa5b0c663f022ec",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/ch_06_packages/_03_what_do_you_require.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39297"
}
],
"symlink_target": ""
} |
"""
A Hyper-V Nova Compute driver.
"""
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import rdpconsoleops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
def __init__(self, virtapi):
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
def init_host(self, host):
pass
def list_instances(self):
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance['name'])
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance['name'])
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_host_stats(self, refresh=False):
return self._hostops.get_host_stats(refresh)
def host_power_action(self, host, action):
return self._hostops.host_power_action(host, action)
def snapshot(self, context, instance, name, update_task_state):
self._snapshotops.snapshot(context, instance, name, update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance):
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance)
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance_ref, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info):
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info,
block_migr=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(ctxt,
instance_ref,
network_info,
block_migr)
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
ctxt, instance_ref, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
self._livemigrationops.check_can_live_migrate_destination_cleanup(
ctxt, dest_check_data)
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
return self._livemigrationops.check_can_live_migrate_source(
ctxt, instance_ref, dest_check_data)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
LOG.debug(_("ensure_filtering_rules_for_instance called"),
instance=instance_ref)
def unfilter_instance(self, instance, network_info):
LOG.debug(_("unfilter_instance called"), instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None):
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info)
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def rename_virtualmachine(self, context, instance):
LOG.info('Doesn\'t actually call the rename method')
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
| {
"content_hash": "bd951e4bc6d346b0c18c490248bc0551",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 79,
"avg_line_length": 44.71212121212121,
"alnum_prop": 0.568056026205806,
"repo_name": "shhui/nova",
"id": "35a5118a833083ea2a4295133887bdf4722117ee",
"size": "9509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/hyperv/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "5874"
},
{
"name": "Diff",
"bytes": "23363"
},
{
"name": "Groff",
"bytes": "112"
},
{
"name": "Python",
"bytes": "13937011"
},
{
"name": "Shell",
"bytes": "48928"
},
{
"name": "Smarty",
"bytes": "595873"
}
],
"symlink_target": ""
} |
"""
sentry.permissions
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.conf import settings
from django.db.models import Q
from functools import wraps
from sentry import features
from sentry.models import OrganizationMemberType
from sentry.utils.cache import cached_for_request
class Permission(object):
def __init__(self, name, label):
self.name = name
self.label = label
def __unicode__(self):
return self.name
def __eq__(self, other):
return six.text_type(self) == six.text_type(other)
class Permissions(object):
ADD_ORGANIZATION = Permission('add_organization', 'create new organizations')
ADD_TEAM = Permission('add_team', 'create new teams')
ADD_PROJECT = Permission('add_project', 'create new projects')
ADD_MEMBER = Permission('add_organization_member', 'add an organization member')
def requires_login(func):
@wraps(func)
def wrapped(user, *args, **kwargs):
if not (user and user.is_authenticated()):
return False
return func(user, *args, **kwargs)
return wrapped
def is_organization_admin(user, organization):
# an organization admin *must* have global access
return organization.member_set.filter(
user=user,
type__lte=OrganizationMemberType.ADMIN,
has_global_access=True,
).exists()
def is_team_admin(user, team):
return team.organization.member_set.filter(
Q(has_global_access=True) | Q(teams=team),
user=user,
type__lte=OrganizationMemberType.ADMIN,
).exists()
def is_project_admin(user, project):
return is_team_admin(user, project.team)
@cached_for_request
@requires_login
def can_create_teams(user, organization):
"""
Returns a boolean describing whether a user has the ability to
create new teams.
"""
if user.is_superuser:
return True
if not is_organization_admin(user, organization):
return False
return features.has('teams:create', organization, actor=user)
@cached_for_request
@requires_login
def can_create_projects(user, team):
"""
Returns a boolean describing whether a user has the ability to
create new projects.
"""
if user.is_superuser:
return True
if not is_team_admin(user, team):
return False
return True
@requires_login
def can_set_public_projects(user):
"""
Returns a boolean describing whether a user has the ability to
change the ``public`` attribute of projects.
"""
if user.is_superuser:
return True
return settings.SENTRY_ALLOW_PUBLIC_PROJECTS
@requires_login
def can_manage_org(user, organization):
if user.is_superuser:
return True
if is_organization_admin(user, organization):
return True
return False
@requires_login
def can_manage_team(user, team):
if can_manage_org(user, team.organization):
return True
if is_team_admin(user, team):
return True
return False
@requires_login
def can_add_organization_member(user, organization):
# must be an owner of the team
if user.is_superuser:
return True
if not is_organization_admin(user, organization):
return False
return True
@requires_login
def can_manage_organization_member(user, member, perm):
# permissions always take precedence
if user.is_superuser:
return True
# must be an owner of the team
if not is_organization_admin(user, member.organization):
return False
return True
def can_edit_organization_member(user, member):
return can_manage_organization_member(user, member, 'edit_organization_member')
def can_remove_organization_member(user, member):
return can_manage_organization_member(user, member, 'remove_organization_member')
@requires_login
def can_remove_project(user, project):
if project.is_internal_project():
return False
if user.is_superuser:
return True
if not is_project_admin(user, project):
return False
return True
@requires_login
def can_add_project_key(user, project):
if user.is_superuser:
return True
if not is_project_admin(user, project):
return False
return True
@requires_login
def can_edit_project_key(user, key):
if user.is_superuser:
return True
project = key.project
if not is_project_admin(user, project):
return False
return True
@requires_login
def can_remove_project_key(user, key):
if user.is_superuser:
return True
project = key.project
if not is_project_admin(user, project):
return False
return True
| {
"content_hash": "759efda116098e967e5f88943a4495fc",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 85,
"avg_line_length": 22.10091743119266,
"alnum_prop": 0.6737235367372354,
"repo_name": "1tush/sentry",
"id": "59e768398d43d095f5d01b928e1af9e3d227f3e4",
"size": "4818",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "601218"
},
{
"name": "HTML",
"bytes": "324270"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6357668"
}
],
"symlink_target": ""
} |
"""
vtkImageExportToArray - a NumPy front-end to vtkImageExport
This class converts a VTK image to a numpy array. The output
array will always have 3 dimensions (or 4, if the image had
multiple scalar components).
To use this class, you must have numpy installed (http://numpy.scipy.org)
Methods
SetInputConnection(vtkAlgorithmOutput) -- connect to VTK image pipeline
SetInputData(vtkImageData) -- set an vtkImageData to export
GetArray() -- execute pipeline and return a numpy array
Methods from vtkImageExport
GetDataExtent()
GetDataSpacing()
GetDataOrigin()
"""
import umath
import numpy
from vtk import vtkImageExport
from vtk import vtkStreamingDemandDrivenPipeline
from vtk import VTK_SIGNED_CHAR
from vtk import VTK_UNSIGNED_CHAR
from vtk import VTK_SHORT
from vtk import VTK_UNSIGNED_SHORT
from vtk import VTK_INT
from vtk import VTK_UNSIGNED_INT
from vtk import VTK_LONG
from vtk import VTK_UNSIGNED_LONG
from vtk import VTK_FLOAT
from vtk import VTK_DOUBLE
class vtkImageExportToArray:
def __init__(self):
self.__export = vtkImageExport()
self.__ConvertUnsignedShortToInt = False
# type dictionary
__typeDict = { VTK_SIGNED_CHAR:'b',
VTK_UNSIGNED_CHAR:'B',
VTK_SHORT:'h',
VTK_UNSIGNED_SHORT:'H',
VTK_INT:'i',
VTK_UNSIGNED_INT:'I',
VTK_FLOAT:'f',
VTK_DOUBLE:'d'}
__sizeDict = { VTK_SIGNED_CHAR:1,
VTK_UNSIGNED_CHAR:1,
VTK_SHORT:2,
VTK_UNSIGNED_SHORT:2,
VTK_INT:4,
VTK_UNSIGNED_INT:4,
VTK_FLOAT:4,
VTK_DOUBLE:8 }
# convert unsigned shorts to ints, to avoid sign problems
def SetConvertUnsignedShortToInt(self,yesno):
self.__ConvertUnsignedShortToInt = yesno
def GetConvertUnsignedShortToInt(self):
return self.__ConvertUnsignedShortToInt
def ConvertUnsignedShortToIntOn(self):
self.__ConvertUnsignedShortToInt = True
def ConvertUnsignedShortToIntOff(self):
self.__ConvertUnsignedShortToInt = False
# set the input
def SetInputConnection(self,input):
return self.__export.SetInputConnection(input)
def SetInputData(self,input):
return self.__export.SetInputData(input)
def GetInput(self):
return self.__export.GetInput()
def GetArray(self):
self.__export.Update()
input = self.__export.GetInput()
extent = input.GetExtent()
type = input.GetScalarType()
numComponents = input.GetNumberOfScalarComponents()
dim = (extent[5]-extent[4]+1,
extent[3]-extent[2]+1,
extent[1]-extent[0]+1)
if (numComponents > 1):
dim = dim + (numComponents,)
imArray = numpy.zeros(dim, self.__typeDict[type])
self.__export.Export(imArray)
# convert unsigned short to int to avoid sign issues
if (type == VTK_UNSIGNED_SHORT and self.__ConvertUnsignedShortToInt):
imArray = umath.bitwise_and(imArray.astype('i'),0xffff)
return imArray
def GetDataExtent(self):
return self.__export.GetDataExtent()
def GetDataSpacing(self):
return self.__export.GetDataSpacing()
def GetDataOrigin(self):
return self.__export.GetDataOrigin()
| {
"content_hash": "8a203673ec931ffa0d4b249b04d3fcaa",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 77,
"avg_line_length": 29.50862068965517,
"alnum_prop": 0.6394975167981303,
"repo_name": "cjh1/VTK",
"id": "5891b415f19d757b273be4a13178c535db8c84dc",
"size": "3423",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Wrapping/Python/vtk/util/vtkImageExportToArray.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "C",
"bytes": "43136914"
},
{
"name": "C++",
"bytes": "53535381"
},
{
"name": "CSS",
"bytes": "7532"
},
{
"name": "Java",
"bytes": "132882"
},
{
"name": "Objective-C",
"bytes": "540710"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "177703"
},
{
"name": "Prolog",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "980726"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Tcl",
"bytes": "1890698"
}
],
"symlink_target": ""
} |
import sys
from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter
from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams
from pdfminer.image import ImageWriter
# main
def main(argv):
import getopt
def usage():
print ('usage: %s [-d] [-p pagenos] [-m maxpages] [-P password] [-o output] [-C] '
'[-n] [-A] [-V] [-M char_margin] [-L line_margin] [-W word_margin] [-F boxes_flow] '
'[-Y layout_mode] [-O output_dir] [-t text|html|xml|tag] [-c codec] [-s scale] file ...' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'dp:m:P:o:CnAVM:L:W:F:Y:O:t:c:s:')
except getopt.GetoptError:
return usage()
if not args: return usage()
# debug option
debug = 0
# input option
password = ''
pagenos = set()
maxpages = 0
# output option
outfile = None
outtype = None
imagewriter = None
layoutmode = 'normal'
codec = 'utf-8'
pageno = 1
scale = 1
caching = True
showpageno = True
laparams = LAParams()
for (k, v) in opts:
if k == '-d': debug += 1
elif k == '-p': pagenos.update( int(x)-1 for x in v.split(',') )
elif k == '-m': maxpages = int(v)
elif k == '-P': password = v
elif k == '-o': outfile = v
elif k == '-C': caching = False
elif k == '-n': laparams = None
elif k == '-A': laparams.all_texts = True
elif k == '-V': laparams.detect_vertical = True
elif k == '-M': laparams.char_margin = float(v)
elif k == '-L': laparams.line_margin = float(v)
elif k == '-W': laparams.word_margin = float(v)
elif k == '-F': laparams.boxes_flow = float(v)
elif k == '-Y': layoutmode = v
elif k == '-O': imagewriter = ImageWriter(v)
elif k == '-t': outtype = v
elif k == '-c': codec = v
elif k == '-s': scale = float(v)
#
PDFDocument.debug = debug
PDFParser.debug = debug
CMapDB.debug = debug
PDFResourceManager.debug = debug
PDFPageInterpreter.debug = debug
PDFDevice.debug = debug
#
rsrcmgr = PDFResourceManager(caching=caching)
if not outtype:
outtype = 'text'
if outfile:
if outfile.endswith('.htm') or outfile.endswith('.html'):
outtype = 'html'
elif outfile.endswith('.xml'):
outtype = 'xml'
elif outfile.endswith('.tag'):
outtype = 'tag'
if outfile:
outfp = file(outfile, 'w')
else:
outfp = sys.stdout
if outtype == 'text':
device = TextConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'xml':
device = XMLConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'html':
device = HTMLConverter(rsrcmgr, outfp, codec=codec, scale=scale,
layoutmode=layoutmode, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'tag':
device = TagExtractor(rsrcmgr, outfp, codec=codec)
else:
return usage()
for fname in args:
fp = file(fname, 'rb')
process_pdf(rsrcmgr, device, fp, pagenos, maxpages=maxpages, password=password,
caching=caching, check_extractable=True)
fp.close()
device.close()
outfp.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
| {
"content_hash": "77ca01a17aa181b86ca178042747f150",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 114,
"avg_line_length": 36.36538461538461,
"alnum_prop": 0.5674246430460074,
"repo_name": "sahat/cloudbucket",
"id": "2d233f95e2c9fb734fe63de307241566c013d614",
"size": "3805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/pdf2txt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "460357"
},
{
"name": "JavaScript",
"bytes": "1689684"
},
{
"name": "Python",
"bytes": "497862"
}
],
"symlink_target": ""
} |
from ._clusters_operations import ClustersOperations
from ._configuration_operations import ConfigurationOperations
from ._namespaces_operations import NamespacesOperations
from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
from ._private_link_resources_operations import PrivateLinkResourcesOperations
from ._operations import Operations
from ._event_hubs_operations import EventHubsOperations
from ._disaster_recovery_configs_operations import DisasterRecoveryConfigsOperations
from ._consumer_groups_operations import ConsumerGroupsOperations
__all__ = [
'ClustersOperations',
'ConfigurationOperations',
'NamespacesOperations',
'PrivateEndpointConnectionsOperations',
'PrivateLinkResourcesOperations',
'Operations',
'EventHubsOperations',
'DisasterRecoveryConfigsOperations',
'ConsumerGroupsOperations',
]
| {
"content_hash": "6a80dea650a0c6676201217a31f2d228",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 42.19047619047619,
"alnum_prop": 0.8261851015801355,
"repo_name": "Azure/azure-sdk-for-python",
"id": "11daeaa7c220bfc78b50abcadf34542074d239f6",
"size": "1354",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2021_06_01_preview/operations/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0004_eventbase_part_of'),
]
operations = [
migrations.AddField(
model_name='eventbase',
name='price_line',
field=models.CharField(max_length=255, help_text=b'A one-line description of the price for this event, e.g. "$12 / $10 / $6"', blank=True),
),
migrations.AlterField(
model_name='eventbase',
name='part_of',
field=models.ForeignKey(help_text=b'If this event is part of another event, select it here.', blank=True, related_name='contained_events', null=True, to='icekit_events.EventBase'),
),
]
| {
"content_hash": "8591750039e2c5fb02082543d3a44a20",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 192,
"avg_line_length": 34.26086956521739,
"alnum_prop": 0.6154822335025381,
"repo_name": "ic-labs/django-icekit",
"id": "a7bda0b59eda2038978717c86cf357857a3ec8a6",
"size": "812",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "icekit_events/migrations/0005_auto_20161024_1742.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import csv
import pytz
from furl import furl
from datetime import datetime, timedelta
from django.db.models import Q
from django.views.defaults import page_not_found
from django.views.generic import FormView, DeleteView, ListView, TemplateView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from osf.models.user import OSFUser
from osf.models.node import Node, NodeLog
from osf.models.spam import SpamStatus
from framework.auth import get_user
from framework.auth.utils import impute_names
from framework.auth.core import generate_verification_key
from website.mailchimp_utils import subscribe_on_confirm
from website import search
from admin.base.views import GuidView
from osf.models.admin_log_entry import (
update_admin_log,
USER_2_FACTOR,
USER_EMAILED,
USER_REMOVED,
USER_RESTORED,
CONFIRM_SPAM,
REINDEX_ELASTIC,
)
from admin.users.serializers import serialize_user
from admin.users.forms import EmailResetForm, WorkshopForm, UserSearchForm, MergeUserForm
from admin.users.templatetags.user_extras import reverse_user
from website.settings import DOMAIN, OSF_SUPPORT_EMAIL
class UserDeleteView(PermissionRequiredMixin, DeleteView):
""" Allow authorised admin user to remove/restore user
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_user.html'
context_object_name = 'user'
object = None
permission_required = 'osf.change_osfuser'
raise_exception = True
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
if user.date_disabled is None or kwargs.get('is_spam'):
user.disable_account()
user.is_registered = False
if 'spam_flagged' in user.system_tags:
user.tags.through.objects.filter(tag__name='spam_flagged').delete()
if 'ham_confirmed' in user.system_tags:
user.tags.through.objects.filter(tag__name='ham_confirmed').delete()
if kwargs.get('is_spam') and 'spam_confirmed' not in user.system_tags:
user.add_system_tag('spam_confirmed')
flag = USER_REMOVED
message = 'User account {} disabled'.format(user.pk)
else:
user.requested_deactivation = False
user.date_disabled = None
subscribe_on_confirm(user)
user.is_registered = True
user.tags.through.objects.filter(tag__name__in=['spam_flagged', 'spam_confirmed'], tag__system=True).delete()
if 'ham_confirmed' not in user.system_tags:
user.add_system_tag('ham_confirmed')
flag = USER_RESTORED
message = 'User account {} reenabled'.format(user.pk)
user.save()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message=message,
action_flag=flag
)
return redirect(reverse_user(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = {}
context.setdefault('guid', kwargs.get('object')._id)
return super(UserDeleteView, self).get_context_data(**context)
def get_object(self, queryset=None):
return OSFUser.load(self.kwargs.get('guid'))
class SpamUserDeleteView(UserDeleteView):
"""
Allow authorized admin user to delete a spam user and mark all their nodes as private
"""
template_name = 'users/remove_spam_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if not node.is_registration and not node.is_spam:
node.confirm_spam(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed SPAM: {} when user {} marked as spam'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': True})
return super(SpamUserDeleteView, self).delete(request, *args, **kwargs)
class HamUserRestoreView(UserDeleteView):
"""
Allow authorized admin user to undelete a ham user
"""
template_name = 'users/restore_ham_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if node.is_spam:
node.confirm_ham(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed HAM: {} when user {} marked as ham'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': False})
return super(HamUserRestoreView, self).delete(request, *args, **kwargs)
class UserSpamList(PermissionRequiredMixin, ListView):
SPAM_TAG = 'spam_flagged'
paginate_by = 25
paginate_orphans = 1
ordering = ('date_disabled')
context_object_name = '-osfuser'
permission_required = ('osf.view_spam', 'osf.view_osfuser')
raise_exception = True
def get_queryset(self):
return OSFUser.objects.filter(tags__name=self.SPAM_TAG).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'users': list(map(serialize_user, query_set)),
'page': page,
}
class UserFlaggedSpamList(UserSpamList, DeleteView):
SPAM_TAG = 'spam_flagged'
template_name = 'users/flagged_spam_list.html'
def delete(self, request, *args, **kwargs):
if not request.user.get_perms('osf.mark_spam'):
raise PermissionDenied("You don't have permission to update this user's spam status.")
user_ids = [
uid for uid in request.POST.keys()
if uid != 'csrfmiddlewaretoken'
]
for uid in user_ids:
user = OSFUser.load(uid)
if 'spam_flagged' in user.system_tags:
user.system_tags.remove('spam_flagged')
user.add_system_tag('spam_confirmed')
user.save()
update_admin_log(
user_id=self.request.user.id,
object_id=uid,
object_repr='User',
message='Confirmed SPAM: {}'.format(uid),
action_flag=CONFIRM_SPAM
)
return redirect('users:flagged-spam')
class UserKnownSpamList(UserSpamList):
SPAM_TAG = 'spam_confirmed'
template_name = 'users/known_spam_list.html'
class UserKnownHamList(UserSpamList):
SPAM_TAG = 'ham_confirmed'
template_name = 'users/known_spam_list.html'
class User2FactorDeleteView(UserDeleteView):
""" Allow authorised admin user to remove 2 factor authentication.
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_2_factor.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
try:
user.delete_addon('twofactor')
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Removed 2 factor auth for user {}'.format(user.pk),
action_flag=USER_2_FACTOR
)
return redirect(reverse_user(self.kwargs.get('guid')))
class UserFormView(PermissionRequiredMixin, FormView):
template_name = 'users/search.html'
object_type = 'osfuser'
permission_required = 'osf.view_osfuser'
raise_exception = True
form_class = UserSearchForm
def __init__(self, *args, **kwargs):
self.redirect_url = None
super(UserFormView, self).__init__(*args, **kwargs)
def form_valid(self, form):
guid = form.cleaned_data['guid']
name = form.cleaned_data['name']
email = form.cleaned_data['email']
if guid or email:
if email:
try:
user = OSFUser.objects.filter(Q(username=email) | Q(emails__address=email)).get()
guid = user.guids.first()._id
except OSFUser.DoesNotExist:
return page_not_found(self.request, AttributeError('User with email address {} not found.'.format(email)))
self.redirect_url = reverse('users:user', kwargs={'guid': guid})
elif name:
self.redirect_url = reverse('users:search_list', kwargs={'name': name})
return super(UserFormView, self).form_valid(form)
@property
def success_url(self):
return self.redirect_url
class UserMergeAccounts(PermissionRequiredMixin, FormView):
template_name = 'users/merge_accounts_modal.html'
permission_required = 'osf.view_osfuser'
object_type = 'osfuser'
raise_exception = True
form_class = MergeUserForm
def get_context_data(self, **kwargs):
return {'guid': self.get_object()._id}
def get_object(self, queryset=None):
return OSFUser.load(self.kwargs.get('guid'))
def form_valid(self, form):
user = self.get_object()
guid_to_be_merged = form.cleaned_data['user_guid_to_be_merged']
user_to_be_merged = OSFUser.objects.get(guids___id=guid_to_be_merged, guids___id__isnull=False)
user.merge_user(user_to_be_merged)
return redirect(reverse_user(user._id))
def form_invalid(self, form):
raise Http404(
'{} not found.'.format(
form.cleaned_data.get('user_guid_to_be_merged', 'guid')
))
class UserSearchList(PermissionRequiredMixin, ListView):
template_name = 'users/list.html'
permission_required = 'osf.view_osfuser'
raise_exception = True
form_class = UserSearchForm
paginate_by = 25
def get_queryset(self):
query = OSFUser.objects.filter(fullname__icontains=self.kwargs['name']).only(
'guids', 'fullname', 'username', 'date_confirmed', 'date_disabled'
)
return query
def get_context_data(self, **kwargs):
users = self.get_queryset()
page_size = self.get_paginate_by(users)
paginator, page, query_set, is_paginated = self.paginate_queryset(users, page_size)
kwargs['page'] = page
kwargs['users'] = [{
'name': user.fullname,
'username': user.username,
'id': user.guids.first()._id,
'confirmed': user.date_confirmed,
'disabled': user.date_disabled if user.is_disabled else None
} for user in query_set]
return super(UserSearchList, self).get_context_data(**kwargs)
class UserView(PermissionRequiredMixin, GuidView):
template_name = 'users/user.html'
context_object_name = 'user'
permission_required = 'osf.view_osfuser'
raise_exception = True
def get_context_data(self, **kwargs):
kwargs = super(UserView, self).get_context_data(**kwargs)
kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass spam status in to check against
return kwargs
def get_object(self, queryset=None):
return serialize_user(OSFUser.load(self.kwargs.get('guid')))
class UserWorkshopFormView(PermissionRequiredMixin, FormView):
form_class = WorkshopForm
object_type = 'user'
template_name = 'users/workshop.html'
permission_required = 'osf.view_osfuser'
raise_exception = True
def form_valid(self, form):
csv_file = form.cleaned_data['document']
final = self.parse(csv_file)
file_name = csv_file.name
results_file_name = '{}_user_stats.csv'.format(file_name.replace(' ', '_').strip('.csv'))
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(results_file_name)
writer = csv.writer(response)
for row in final:
writer.writerow(row)
return response
@staticmethod
def find_user_by_email(email):
user_list = OSFUser.objects.filter(emails__address=email)
return user_list[0] if user_list.exists() else None
@staticmethod
def find_user_by_full_name(full_name):
user_list = OSFUser.objects.filter(fullname=full_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def find_user_by_family_name(family_name):
user_list = OSFUser.objects.filter(family_name=family_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def get_num_logs_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return NodeLog.objects.filter(user=user, date__gt=query_date).count()
@staticmethod
def get_num_nodes_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return Node.objects.filter(creator=user, created__gt=query_date).count()
@staticmethod
def get_user_latest_log(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return NodeLog.objects.filter(user=user, date__gt=query_date).latest('date')
def parse(self, csv_file):
""" Parse and add to csv file.
:param csv_file: Comma separated
:return: A list
"""
result = []
csv_reader = csv.reader(csv_file)
for index, row in enumerate(csv_reader):
if index == 0:
row.extend([
'OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Date'
])
result.append(row)
continue
email = row[5]
user_by_email = self.find_user_by_email(email)
if not user_by_email:
full_name = row[4]
try:
family_name = impute_names(full_name)['family']
except UnicodeDecodeError:
row.extend(['Unable to parse name'])
result.append(row)
continue
user_by_name = self.find_user_by_full_name(full_name) or self.find_user_by_family_name(family_name)
if not user_by_name:
row.extend(['', 0, 0, ''])
result.append(row)
continue
else:
user = user_by_name
else:
user = user_by_email
workshop_date = pytz.utc.localize(datetime.strptime(row[1], '%m/%d/%y'))
nodes = self.get_num_nodes_since_workshop(user, workshop_date)
user_logs = self.get_num_logs_since_workshop(user, workshop_date)
last_log_date = self.get_user_latest_log(user, workshop_date).date.strftime('%m/%d/%y') if user_logs else ''
row.extend([
user._id, user_logs, nodes, last_log_date
])
result.append(row)
return result
def form_invalid(self, form):
super(UserWorkshopFormView, self).form_invalid(form)
class GetUserLink(PermissionRequiredMixin, TemplateView):
permission_required = 'osf.change_osfuser'
template_name = 'users/get_link.html'
raise_exception = True
def get_link(self, user):
raise NotImplementedError()
def get_link_type(self):
# Used in the title of the link modal
raise NotImplementedError()
def get_claim_links(self, user):
return None
def get_context_data(self, **kwargs):
user = OSFUser.load(self.kwargs.get('guid'))
kwargs['user_link'] = self.get_link(user)
kwargs['username'] = user.username
kwargs['title'] = self.get_link_type()
kwargs['node_claim_links'] = self.get_claim_links(user)
return super(GetUserLink, self).get_context_data(**kwargs)
class GetUserConfirmationLink(GetUserLink):
def get_link(self, user):
return user.get_confirmation_url(user.username, force=True)
def get_link_type(self):
return 'User Confirmation'
class GetPasswordResetLink(GetUserLink):
def get_link(self, user):
user.verification_key_v2 = generate_verification_key(verification_type='password')
user.verification_key_v2['expires'] = datetime.utcnow().replace(tzinfo=pytz.utc) + timedelta(hours=48)
user.save()
reset_abs_url = furl(DOMAIN)
reset_abs_url.path.add(('resetpassword/{}/{}'.format(user._id, user.verification_key_v2['token'])))
return reset_abs_url
def get_link_type(self):
return 'Password Reset'
class GetUserClaimLinks(GetUserLink):
def get_claim_links(self, user):
links = []
for guid, value in user.unclaimed_records.items():
node = Node.load(guid)
url = '{base_url}user/{uid}/{project_id}/claim/?token={token}'.format(
base_url=DOMAIN,
uid=user._id,
project_id=guid,
token=value['token']
)
links.append('Claim URL for node {}: {}'.format(node._id, url))
return links or ['User currently has no active unclaimed records for any nodes.']
def get_link(self, user):
return None
def get_link_type(self):
return 'Claim User'
class ResetPasswordView(PermissionRequiredMixin, FormView):
form_class = EmailResetForm
template_name = 'users/reset.html'
context_object_name = 'user'
permission_required = 'osf.change_osfuser'
raise_exception = True
def dispatch(self, request, *args, **kwargs):
self.user = OSFUser.load(self.kwargs.get('guid'))
if self.user is None:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
return super(ResetPasswordView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
self.initial = {
'guid': self.user._id,
'emails': [(r, r) for r in self.user.emails.values_list('address', flat=True)],
}
return super(ResetPasswordView, self).get_initial()
def get_context_data(self, **kwargs):
kwargs.setdefault('guid', self.user._id)
kwargs.setdefault('emails', self.user.emails)
return super(ResetPasswordView, self).get_context_data(**kwargs)
def form_valid(self, form):
email = form.cleaned_data.get('emails')
user = get_user(email)
if user is None or user._id != self.kwargs.get('guid'):
return HttpResponse(
'{} with id "{}" and email "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid'),
email
),
status=409
)
reset_abs_url = furl(DOMAIN)
user.verification_key_v2 = generate_verification_key(verification_type='password')
user.save()
reset_abs_url.path.add(('resetpassword/{}/{}'.format(user._id, user.verification_key_v2['token'])))
send_mail(
subject='Reset OSF Password',
message='Follow this link to reset your password: {}'.format(
reset_abs_url.url
),
from_email=OSF_SUPPORT_EMAIL,
recipient_list=[email]
)
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Emailed user {} a reset link.'.format(user.pk),
action_flag=USER_EMAILED
)
return super(ResetPasswordView, self).form_valid(form)
@property
def success_url(self):
return reverse_user(self.kwargs.get('guid'))
class UserReindexElastic(UserDeleteView):
template_name = 'users/reindex_user_elastic.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
search.search.update_user(user, async=False)
update_admin_log(
user_id=self.request.user.id,
object_id=user._id,
object_repr='User',
message='User Reindexed (Elastic): {}'.format(user._id),
action_flag=REINDEX_ELASTIC
)
return redirect(reverse_user(self.kwargs.get('guid')))
| {
"content_hash": "ecbf1c0e75b6b7f3e2a40b8b64a8fc74",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 126,
"avg_line_length": 35.61389337641357,
"alnum_prop": 0.5940576094352461,
"repo_name": "caseyrollins/osf.io",
"id": "f44fa7a4c1e9572765a76bfc8e54e99a5c0986e3",
"size": "22045",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "admin/users/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93007"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "296984"
},
{
"name": "JavaScript",
"bytes": "1813961"
},
{
"name": "Mako",
"bytes": "676476"
},
{
"name": "Python",
"bytes": "8712355"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import os
PACKAGE = "timezonefinder"
VERSION_FILE = "VERSION"
VIRT_ENVS = ["APIenv"]
VIRT_ENV_COMMAND = ". ~/miniconda3/etc/profile.d/conda.sh; conda activate {virt_env}; "
PY_VERSION_IDS = [
"36",
"37",
"38",
] # the supported python versions to create wheels for
PYTHON_TAG = ".".join([f"py{v}" for v in PY_VERSION_IDS])
if __name__ == "__main__":
print("building now:")
# routine("python3 setup.py sdist bdist_wheel upload", 'Uploading the package now.') # deprecated
# new twine publishing routine:
# https://packaging.python.org/tutorials/packaging-projects/
# delete the build folder before to get a fresh build
# TODO do not remove dist in the future
os.system("rm -r -f build")
os.system("rm -r -f dist")
build_cmd = f"python setup.py sdist bdist_wheel --python-tag {PYTHON_TAG}"
os.system(build_cmd)
# in all specified virtual environments
for virt_env in VIRT_ENVS:
virt_env_cmd = VIRT_ENV_COMMAND.format(virt_env=virt_env)
install_cmd = f"{virt_env_cmd} python setup.py install"
os.system(install_cmd)
# routine(build_cmd, 'building the package now.',
# 'build done. check the included files! installing package in virtual environment next.')
# routine(install_cmd)
os.system("rm -r -f build")
| {
"content_hash": "f474353761f4803cce13bd048e257aa8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 102,
"avg_line_length": 35.67567567567568,
"alnum_prop": 0.656060606060606,
"repo_name": "MrMinimal64/timezonefinder",
"id": "2e9f53d0996d52011e7966a02efb3135f0c958bc",
"size": "1320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build_n_install.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135801"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from sqlalchemy import func
from sqlalchemy.orm import column_property
from sqlalchemy.sql.expression import case
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (
Column, Integer, String, Numeric, DateTime, Date, Interval, Table,
Sequence)
Base = declarative_base()
count = func.count
sum_ = func.sum
distinct = func.distinct
date_part = func.date_part
date_trunc = func.date_trunc
split_part = func.split_part
strpos = func.strpos
substr = func.substr
length = func.length
array_agg = func.array_agg
def string(pkey=False):
return Column(String, primary_key=pkey)
def integer(pkey=False):
return Column(Integer, primary_key=pkey)
def decimal():
return Column(Numeric)
def datetime():
return Column(DateTime)
def date_column(pkey=False):
return Column(Date, primary_key=pkey)
class Visit(Base):
"""This mapped class contains the visits"""
__tablename__ = 'visit'
id = integer(pkey=True)
uuid = string()
browser_name = string()
hash = string()
host = string()
browser_version = string()
client_tz_offset = integer()
date = datetime()
last_visit = datetime()
ip = string()
language = string()
page = string()
platform = string()
query = string()
referrer = string()
pretty_referrer = string()
referrer_domain = string()
site = string()
size = string()
time = Column(Interval)
country = string()
country_code = string()
city = string()
lat = decimal()
lng = decimal()
asn = string()
browser_name_version = string()
day = date_column()
hour = integer()
subdomain = string()
domain = string()
spent_time = column_property(
case([
(time == None, None),
(time < timedelta(seconds=1), 0),
(time < timedelta(seconds=2), 1),
(time < timedelta(seconds=5), 2),
(time < timedelta(seconds=10), 3),
(time < timedelta(seconds=20), 4),
(time < timedelta(seconds=30), 5),
(time < timedelta(seconds=60), 6),
(time < timedelta(seconds=120), 7),
(time < timedelta(seconds=300), 8),
(time < timedelta(seconds=600), 9)
], else_=10))
class CriterionView(Base):
__tablename__ = 'criterion_view'
id = integer(pkey=True)
uuid = string()
browser_name = string()
hash = string()
host = string()
browser_version = string()
client_tz_offset = integer()
date = datetime()
last_visit = datetime()
ip = string()
language = string()
page = string()
platform = string()
query = string()
referrer = string()
pretty_referrer = string()
referrer_domain = string()
site = string()
size = string()
time = Column(Interval)
country = string()
country_code = string()
city = string()
lat = decimal()
lng = decimal()
asn = string()
browser_name_version = string()
day = date_column()
hour = integer()
subdomain = string()
domain = string()
VisitIdSeq = Sequence('visit_id_seq')
metadata = Base.metadata
# Geoip database
country = Table(
'country', metadata,
Column('ipr', String),
Column('country_code', String),
Column('country_name', String),
schema='geoip'
)
city = Table(
'city', metadata,
Column('ipr', String),
Column('country_code', String),
Column('region', String),
Column('city', String),
Column('postal_code', String),
Column('latitude', Numeric),
Column('longitude', Numeric),
Column('metro_code', Integer),
Column('area_code', Integer),
schema='geoip'
)
asn = Table(
'asn', metadata,
Column('ipr', String),
Column('asn', String),
schema='geoip'
)
| {
"content_hash": "08fbfe91ba5afa96ac4eb27aaaf34192",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 70,
"avg_line_length": 23.042168674698797,
"alnum_prop": 0.6083660130718954,
"repo_name": "Kozea/pystil",
"id": "77265233fb1d0585f0f42054af39ddb287ac5f1c",
"size": "3972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pystil/db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18446"
},
{
"name": "CoffeeScript",
"bytes": "9758"
},
{
"name": "HTML",
"bytes": "13534"
},
{
"name": "JavaScript",
"bytes": "244986"
},
{
"name": "Python",
"bytes": "49115"
},
{
"name": "Ruby",
"bytes": "884"
},
{
"name": "Shell",
"bytes": "1916"
}
],
"symlink_target": ""
} |
"""
从stock里获取网络数据,存放在数据库里
"""
import datetime
import socket
import pandas as pd
from stock import (get_stock_basics, get_k_data,
get_report_data, get_profit_data,
get_operation_data, get_growth_data,
get_debtpaying_data, get_cashflow_data,
get_tick_data)
from stock.downloader import load_historys
from navel.celery import app
from .models import *
from .stock import get_stock_basics as get_local_stock_basics
import storage.rcache as rcache
def _update_stock_basics():
stock_basics = get_stock_basics()
stock_basics_list = [StockBasics(
code = code,
name = data['name'],
industry = data['industry'],
area = data['area'],
pe = data['pe'],
outstanding = data['outstanding'],
totals = data['totals'],
totalAssets = data['totalAssets'],
liquidAssets = data['liquidAssets'],
fixedAssets = data['fixedAssets'],
reserved = data['reserved'],
reservedPerShare = data['reservedPerShare'],
eps = data['eps'],
bvps = data['bvps'],
pb = data['pb'],
timeToMarket = str(data['timeToMarket']),
) for code, data in stock_basics.iterrows()]
# 先清空
StockBasics.objects.all().delete()
# 再保存
StockBasics.objects.bulk_create(stock_basics_list)
# record update time
rcache.set_timestamp(rcache.KEY_TS_BASIC_INFO, str(datetime.date.today()))
@app.task(bind=True, ignore_result=True)
def update_all(self):
try:
# step 1, update basic info
_update_stock_basics()
# step 2, update history and tick after basic info
update_all_history()
update_tick()
except socket.timeout as e:
print("update_stock_basics as socket.timeout")
self.retry(countdown=5, max_retries=3, exc=e)
except Exception as e:
print("update_stock_basics exception as %(e)s" % locals())
return
@app.task(bind=True, ignore_result=True)
def update_stock_basics(self):
try:
_update_stock_basics()
except socket.timeout as e:
print("update_stock_basics as socket.timeout")
self.retry(countdown=5, max_retries=3, exc=e)
except Exception as e:
print("update_stock_basics exception as %(e)s" % locals())
return
@app.task(ignore_result=True)
def update_history():
"""
deprecated
耗时大约8分钟
"""
start_date = (datetime.date.today()-datetime.timedelta(days=30*6)).strftime("%Y-%m-%d")
historys = load_historys(start_date)
history_list = []
for history in historys:
for day, data in history.iterrows():
history_list.append(History(
code = data['code'],
day = str(day),
open = data['open'],
close = data['close'],
high = data['high'],
low = data['low'],
vol = data['volume'],
))
# 先清空
History.objects.all().delete()
# 再保存
History.objects.bulk_create(history_list)
@app.task(bind=True, ignore_result=True)
def update_one_history(self, code, start):
try:
print("Get %(code)s history data" % locals())
# 获取历史数据
history = get_k_data(code, start)
history.set_index(["date"], inplace=True)
history_list = [History(
code = data['code'],
day = str(day),
open = data['open'],
close = data['close'],
high = data['high'],
low = data['low'],
vol = data['volume'],
) for day, data in history.iterrows()]
# 先清空
# History.objects.filter(code=code).delete()
# 再保存
History.objects.bulk_create(history_list)
except socket.timeout as e:
print("%(code)s as socket.timeout" % locals())
self.retry(countdown=5, max_retries=3, exc=e)
except Exception as e:
print("%(code)s exception as %(e)s" % locals())
return
@app.task(ignore_result=True)
def update_all_history():
start_date = (datetime.date.today()-datetime.timedelta(days=30*6)).strftime("%Y-%m-%d")
# 先清空
History.objects.all().delete()
# all stocks' code
for code in get_local_stock_basics().index:
update_one_history.delay(code, start_date)
# record update time
rcache.set_timestamp(rcache.KEY_TS_HISTORY, str(datetime.date.today()))
@app.task(ignore_result=True)
def update_report_data(year, quarter):
report_data = get_report_data(year, quarter)
# django.db.utils.OperationalError: (1054, "Unknown column 'nane0' in 'field list'")
report_data = report_data.astype(object).where(pd.notnull(report_data), None)
report_data_list = [ReportData(
code = data['code'],
name = data['name'],
eps = data['eps'],
eps_yoy = data['eps_yoy'],
bvps = data['bvps'],
roe = data['roe'],
epcf = data['epcf'],
net_profits = data['net_profits'],
profits_yoy = data['profits_yoy'],
distrib = data['distrib'],
report_date = data['report_date'],
) for index, data in report_data.iterrows()]
# 先清空
ReportData.objects.all().delete()
# 再保存
ReportData.objects.bulk_create(report_data_list)
# record update time
rcache.set_timestamp(rcache.KEY_TS_REPORT_DATA, str(datetime.date.today()))
@app.task(ignore_result=True)
def update_profit_data(year, quarter):
profit_data = get_profit_data(year, quarter)
# django.db.utils.OperationalError: (1054, "Unknown column 'nane0' in 'field list'")
profit_data = profit_data.astype(object).where(pd.notnull(profit_data), None)
profit_data_list = [ProfitData(
code = data['code'],
name = data['name'],
roe = data['roe'],
net_profit_ratio = data['net_profit_ratio'],
gross_profit_rate = data['gross_profit_rate'],
net_profits = data['net_profits'],
eps = data['eps'],
business_income = data['business_income'],
bips = data['bips'],
) for index, data in profit_data.iterrows()]
# 先清空
ProfitData.objects.all().delete()
# 再保存
ProfitData.objects.bulk_create(profit_data_list)
# record update time
rcache.set_timestamp(rcache.KEY_TS_PROFIT_DATA, str(datetime.date.today()))
@app.task(ignore_result=True)
def update_operation_data(year, quarter):
operation_data = get_operation_data(year, quarter)
# django.db.utils.OperationalError: (1054, "Unknown column 'nane0' in 'field list'")
operation_data = operation_data.astype(object).where(pd.notnull(operation_data), None)
operation_data_list = [OperationData(
code = data['code'],
name = data['name'],
arturnover = data['arturnover'],
arturndays = data['arturndays'],
inventory_turnover = data['inventory_turnover'],
inventory_days = data['inventory_days'],
currentasset_turnover = data['currentasset_turnover'],
currentasset_days = data['currentasset_days'],
) for index, data in operation_data.iterrows()]
# 先清空
OperationData.objects.all().delete()
# 再保存
OperationData.objects.bulk_create(operation_data_list)
# record update time
rcache.set_timestamp(rcache.KEY_TS_OPERATION_DATA, str(datetime.date.today()))
@app.task(ignore_result=True)
def update_growth_data(year, quarter):
growth_data = get_growth_data(year, quarter)
# django.db.utils.OperationalError: (1054, "Unknown column 'nane0' in 'field list'")
growth_data = growth_data.astype(object).where(pd.notnull(growth_data), None)
growth_data_list = [GrowthData(
code = data['code'],
name = data['name'],
mbrg = data['mbrg'],
nprg = data['nprg'],
nav = data['nav'],
targ = data['targ'],
epsg = data['epsg'],
seg = data['seg'],
) for index, data in growth_data.iterrows()]
# 先清空
GrowthData.objects.all().delete()
# 再保存
GrowthData.objects.bulk_create(growth_data_list)
# record update time
rcache.set_timestamp(rcache.KEY_TS_GROWTH_DATA, str(datetime.date.today()))
@app.task(ignore_result=True)
def update_debtpaying_data(year, quarter):
debtpaying_data = get_debtpaying_data(year, quarter)
# django.db.utils.OperationalError: (1054, "Unknown column 'nane0' in 'field list'")
debtpaying_data = debtpaying_data.astype(object).where(pd.notnull(debtpaying_data), None)
debtpaying_data_list = [DebtpayingData(
code = data['code'],
name = data['name'],
currentratio = data['currentratio'],
quickratio = data['quickratio'],
cashratio = data['cashratio'],
icratio = data['icratio'],
sheqratio = data['sheqratio'],
adratio = data['adratio'],
) for index, data in debtpaying_data.iterrows()]
# 先清空
DebtpayingData.objects.all().delete()
# 再保存
DebtpayingData.objects.bulk_create(debtpaying_data_list)
# record update time
rcache.set_timestamp(rcache.KEY_TS_DEBTPAYING_DATA, str(datetime.date.today()))
@app.task(ignore_result=True)
def update_cashflow_data(year, quarter):
cashflow_data = get_cashflow_data(year, quarter)
# django.db.utils.OperationalError: (1054, "Unknown column 'nane0' in 'field list'")
cashflow_data = cashflow_data.astype(object).where(pd.notnull(cashflow_data), None)
cashflow_data_list = [CashflowData(
code = data['code'],
name = data['name'],
cf_sales = data['cf_sales'],
rateofreturn = data['rateofreturn'],
cf_nm = data['cf_nm'],
cf_liabilities = data['cf_liabilities'],
cashflowratio = data['cashflowratio'],
) for index, data in cashflow_data.iterrows()]
# 先清空
CashflowData.objects.all().delete()
# 再保存
CashflowData.objects.bulk_create(cashflow_data_list)
# record update time
rcache.set_timestamp(rcache.KEY_TS_CASHFLOW_DATA, str(datetime.date.today()))
@app.task(bind=True, ignore_result=True)
def update_one_tick(self, code, day):
try:
print("Get %(code)s tick data" % locals())
# 获取分笔数据
tick = get_tick_data(code, day)
tick_list = [Tick(
code = code,
day = str(day),
sec1_buy = data['一区买入'],
sec1_sell = data['一区卖出'],
sec2_buy = data['二区买入'],
sec2_sell = data['二区卖出'],
sec3_buy = data['三区买入'],
sec3_sell = data['三区卖出'],
sec4_buy = data['四区买入'],
sec4_sell = data['四区卖出'],
) for index, data in tick.iterrows()]
# 保存
Tick.objects.bulk_create(tick_list)
except socket.timeout as e:
print("%(code)s as socket.timeout" % locals())
self.retry(countdown=5, max_retries=3, exc=e)
except Exception as e:
print("%(code)s exception as %(e)s" % locals())
return
@app.task(ignore_result=True)
def update_all_tick():
"""
deprecated
太费时间,1天的分笔大约13分钟,1天大概就要3~4小时
"""
days = [(datetime.date.today()-datetime.timedelta(days=offset)).strftime("%Y-%m-%d") for offset in range(15)]
# 先清空
Tick.objects.all().delete()
# all stocks' code
for code in get_local_stock_basics().index:
for day in days:
update_one_tick.delay(code, day)
@app.task(ignore_result=True)
def update_tick():
day = datetime.date.today().strftime("%Y-%m-%d")
# 先清空
Tick.objects.all().delete()
# all stocks' code
for code in get_local_stock_basics().index:
update_one_tick.delay(code, day)
# record update time
rcache.set_timestamp(rcache.KEY_TS_TICK_DATA, str(datetime.date.today()))
| {
"content_hash": "3a3c974d71480686697a97ca14b01170",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 113,
"avg_line_length": 34.24147727272727,
"alnum_prop": 0.5901435327304405,
"repo_name": "flychensc/orange",
"id": "99740f8e9452a6c93c8286c803e355587ae0b0e2",
"size": "12359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storage/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4605"
},
{
"name": "HTML",
"bytes": "95363"
},
{
"name": "JavaScript",
"bytes": "4369"
},
{
"name": "Python",
"bytes": "116185"
}
],
"symlink_target": ""
} |
import subprocess
import traceback
import sys
import os
from SimpleXMLRPCServer import SimpleXMLRPCServer as Server
PID_FILE = 'c:\wpf_service.pid'
try:
with file(PID_FILE, 'r') as f:
pid = f.read()
proc = subprocess.Popen(['tasklist', '/fi', 'imagename eq ipy.exe', '/fi', 'pid eq ' + str(pid)], stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if not line:
break
if line.find('ipy.exe') != -1:
print 'server is already running'
exit(1)
except IOError:
pass
print 'pid:', str(os.getpid())
with file(PID_FILE, 'w') as f:
f.write(str(os.getpid()))
import actions
def action(action_name, name, *args):
if actions.set_info.no_info:
return False, 'no_info'
try:
print(action_name, name, args)
if (action_name == 'exist'):
return True, actions.exist(name)
proc = getattr(actions, action_name)
return True, actions.action(proc, name, args)
except:
print str(sys.exc_info()[1])
return False, str(sys.exc_info()[1])
def alive():
return True
server = Server(('127.0.0.1',1337))
server.register_function(action)
server.register_function(actions.set_info)
server.register_function(alive)
server.serve_forever()
| {
"content_hash": "e237f60909efc2d80fbc156f4c4eddbb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 129,
"avg_line_length": 27.645833333333332,
"alnum_prop": 0.6088922381311228,
"repo_name": "renorzr/uimap",
"id": "b47d0207c0ac8959ed0754de0a6293a626490d83",
"size": "1327",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/uimap/drivers/wpf/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22792"
}
],
"symlink_target": ""
} |
"""This code example gets all creative sets.
To create creative sets, run create_creative_sets.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v201502')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative sets by statement.
while True:
response = creative_set_service.getCreativeSetsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for creative_set in response['results']:
print ('Creative set with ID \'%s\' and name \'%s\' was found.'
% (creative_set['id'], creative_set['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| {
"content_hash": "46682fcb04ba5919ff14985004f3d7e6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 31.093023255813954,
"alnum_prop": 0.6843679880329095,
"repo_name": "wubr2000/googleads-python-lib",
"id": "e6b6e42bb4c0a749c6c7a67c01185b0e5e6b91d6",
"size": "1955",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201502/creative_set_service/get_all_creative_sets.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
} |
import random
import sqlalchemy as sa
from tacker.db.vm import vm_db
from tacker.openstack.common import log as logging
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
class ChanceScheduler(object):
"""Select a Device that can serve a service in a random way."""
def schedule(self, plugin, context,
service_type, service_instance_id, name, service_context):
"""
:param service_context: list of DeviceServiceContext
without service_instance_id
[{'network_id': network_id,
'subnet_id': subnet_id,
'port_id': port_id,
'router_id': router_id,
'role': role,
'index': index},
... ]
They can be missing or None = don't care
"""
with context.session.begin(subtransactions=True):
# Race. prevent from inserting ServiceDeviceBinding
# select hosting device that is capable of service_type, but
# not yet used for it.
# i.e.
# device.service_type in
# [st.service_types for st in
# device.template.service_types]
# and
# device.sevice_type not in
# [ls.service_type for ls in device.services]
query = (
context.session.query(vm_db.Device).
filter(vm_db.Device.status == constants.ACTIVE).
filter(
sa.exists().
where(sa.and_(
vm_db.Device.template_id == vm_db.DeviceTemplate.id,
vm_db.DeviceTemplate.id ==
vm_db.ServiceType.template_id,
vm_db.ServiceType.service_type == service_type))).
filter(
~sa.exists().
where(sa.and_(
vm_db.Device.id ==
vm_db.ServiceDeviceBinding.device_id,
vm_db.ServiceDeviceBinding.service_instance_id ==
vm_db.ServiceInstance.id,
vm_db.ServiceInstance.service_type_id ==
vm_db.ServiceType.id,
vm_db.ServiceType.service_type == service_type))))
for sc_entry in service_context:
network_id = sc_entry.get('network_id')
subnet_id = sc_entry.get('subnet_id')
port_id = sc_entry.get('port_id')
router_id = sc_entry.get('router_id')
role = sc_entry.get('role')
index = sc_entry.get('index')
expr = [
vm_db.Device.id == vm_db.DeviceServiceContext.device_id]
if network_id is not None:
expr.append(
vm_db.DeviceServiceContext.network_id == network_id)
if subnet_id is not None:
expr.append(
vm_db.DeviceServiceContext.subnet_id == subnet_id)
if port_id is not None:
expr.append(vm_db.DeviceServiceContext.port_id == port_id)
if router_id is not None:
expr.append(
vm_db.DeviceServiceContext.router_id == router_id)
if role is not None:
expr.append(vm_db.DeviceServiceContext.role == role)
if index is not None:
expr.append(vm_db.DeviceServiceContext.index == index)
query = query.filter(sa.exists().where(sa.and_(*expr)))
candidates = query.with_lockmode("update").all()
if not candidates:
LOG.debug(_('no hosting device supporing %s'), service_type)
return
device = random.choice(candidates)
service_type_id = [s.id for s in device.template.service_types
if s.service_type == service_type][0]
service_instance_param = {
'name': name,
'service_table_id': service_instance_id,
'service_type': service_type,
'service_type_id': service_type_id,
}
service_instance_dict = plugin._create_service_instance(
context, device.id, service_instance_param, False)
return (plugin._make_device_dict(device), service_instance_dict)
| {
"content_hash": "4f220113cf4e4c610a6a84de99af5836",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 78,
"avg_line_length": 41.407407407407405,
"alnum_prop": 0.5100626118067979,
"repo_name": "SripriyaSeetharam/tacker",
"id": "a14398109bc1c93225619b9f826472a3ff847285",
"size": "5344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tacker/vm/hosting_device_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1204880"
},
{
"name": "Shell",
"bytes": "24370"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class GaugeValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="gauge", parent_name="indicator", **kwargs):
super(GaugeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Gauge"),
data_docs=kwargs.pop(
"data_docs",
"""
axis
:class:`plotly.graph_objects.indicator.gauge.Ax
is` instance or dict with compatible properties
bar
Set the appearance of the gauge's value
bgcolor
Sets the gauge background color.
bordercolor
Sets the color of the border enclosing the
gauge.
borderwidth
Sets the width (in px) of the border enclosing
the gauge.
shape
Set the shape of the gauge
steps
A tuple of :class:`plotly.graph_objects.indicat
or.gauge.Step` instances or dicts with
compatible properties
stepdefaults
When used in a template (as layout.template.dat
a.indicator.gauge.stepdefaults), sets the
default property values to use for elements of
indicator.gauge.steps
threshold
:class:`plotly.graph_objects.indicator.gauge.Th
reshold` instance or dict with compatible
properties
""",
),
**kwargs,
)
| {
"content_hash": "945c66ea05c9a20483ce9ec705dfc379",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 37.95454545454545,
"alnum_prop": 0.5341317365269461,
"repo_name": "plotly/plotly.py",
"id": "e4fa1825566590f46e009385a39dd84b15746ee3",
"size": "1670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/indicator/_gauge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import json
from ..utils.data_utils import get_file
from .. import backend as K
CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
def preprocess_input(x, data_format=None):
"""Preprocesses a tensor encoding a batch of images.
# Arguments
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
# Returns
Preprocessed tensor.
"""
if data_format is None:
data_format = K.image_data_format()
assert data_format in {'channels_last', 'channels_first'}
if data_format == 'channels_first':
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
# Zero-center by mean pixel
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
# Zero-center by mean pixel
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
return x
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
# Arguments
preds: Numpy tensor encoding a batch of predictions.
top: integer, how many top-guesses to return.
# Returns
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
# Raises
ValueError: in case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def _obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
include_top):
"""Internal utility to compute/validate an ImageNet model's input shape.
# Arguments
input_shape: either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: default input width/height for the model.
min_size: minimum input width/height accepted by the model.
data_format: image data format to use.
include_top: whether the model is expected to
be linked to a classifier via a Flatten layer.
# Returns
An integer shape tuple (may include None entries).
# Raises
ValueError: in case of invalid argument values.
"""
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if include_top:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting`include_top=True`, '
'`input_shape` should be ' + str(default_shape) + '.')
input_shape = default_shape
else:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3:
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (3, None, None)
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3:
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (None, None, 3)
return input_shape
| {
"content_hash": "ae5b51a7282b02e68eb77562fc073606",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 105,
"avg_line_length": 39.31884057971015,
"alnum_prop": 0.5241430151124217,
"repo_name": "sankit1/cv-tricks.com",
"id": "6ec8c2d89b37985208746baafe435644464144ed",
"size": "5426",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Tensorflow-tutorials/Keras-Tensorflow-tutorial/applications/imagenet_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "318206"
},
{
"name": "Shell",
"bytes": "2014"
}
],
"symlink_target": ""
} |
from enum import Enum
from textwrap import dedent
from typing import Optional, cast
from pants.core.project_info.list_target_types import TargetTypesOptions, list_target_types
from pants.engine.target import BoolField, IntField, RegisteredTargetTypes, StringField, Target
from pants.engine.unions import UnionMembership
from pants.option.global_options import GlobalOptions
from pants.testutil.engine.util import (
MockConsole,
create_goal_subsystem,
create_subsystem,
run_rule,
)
# Note no docstring.
class FortranVersion(StringField):
alias = "fortran_version"
class GenericTimeout(IntField):
"""The number of seconds to run before timing out."""
alias = "timeout"
# Note no docstring, but GenericTimeout has it, so we should end up using that.
class FortranTimeout(GenericTimeout):
pass
class FortranLibrary(Target):
"""A library of Fortran code."""
alias = "fortran_library"
core_fields = (FortranVersion,)
# Note multiline docstring.
class FortranTests(Target):
"""Tests for Fortran code.
This assumes that you use the FRUIT test framework.
"""
alias = "fortran_tests"
core_fields = (FortranVersion, FortranTimeout)
class ArchiveFormat(Enum):
TGZ = ".tgz"
TAR = ".tar"
class ArchiveFormatField(StringField):
alias = "archive_format"
valid_choices = ArchiveFormat
default = ArchiveFormat.TGZ.value
class ErrorBehavior(StringField):
alias = "error_behavior"
valid_choices = ("ignore", "warn", "error")
required = True
# Note no docstring.
class FortranBinary(Target):
alias = "fortran_binary"
core_fields = (FortranVersion, ArchiveFormatField, ErrorBehavior)
def run_goal(
*, union_membership: Optional[UnionMembership] = None, details_target: Optional[str] = None
) -> str:
console = MockConsole(use_colors=False)
run_rule(
list_target_types,
rule_args=[
RegisteredTargetTypes.create([FortranBinary, FortranLibrary, FortranTests]),
union_membership or UnionMembership({}),
create_goal_subsystem(
TargetTypesOptions, sep="\\n", output_file=None, details=details_target
),
create_subsystem(GlobalOptions, v1=False),
console,
],
)
return cast(str, console.stdout.getvalue())
def test_list_all() -> None:
stdout = run_goal()
assert stdout == dedent(
"""\
Target types
------------
Use `./pants target-types --details=$target_type` to get detailed information
for a particular target type.
fortran_binary <no description>
fortran_library A library of Fortran code.
fortran_tests Tests for Fortran code.
"""
)
def test_list_single() -> None:
class CustomField(BoolField):
"""My custom field!
Use this field to...
"""
alias = "custom_field"
required = True
tests_target_stdout = run_goal(
union_membership=UnionMembership({FortranTests.PluginField: [CustomField]}),
details_target=FortranTests.alias,
)
assert tests_target_stdout == dedent(
"""\
fortran_tests
-------------
Tests for Fortran code.
This assumes that you use the FRUIT test framework.
Valid fields:
custom_field
type: bool, required
My custom field! Use this field to...
fortran_version
type: str | None, default: None
timeout
type: int | None, default: None
The number of seconds to run before timing out.
"""
)
binary_target_stdout = run_goal(details_target=FortranBinary.alias)
assert binary_target_stdout == dedent(
"""\
fortran_binary
--------------
Valid fields:
archive_format
type: '.tar' | '.tgz' | None, default: '.tgz'
error_behavior
type: 'error' | 'ignore' | 'warn', required
fortran_version
type: str | None, default: None
"""
)
| {
"content_hash": "3cf568797bf3712c449dd8fae11d039e",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 95,
"avg_line_length": 24.47953216374269,
"alnum_prop": 0.6144290492116579,
"repo_name": "tdyas/pants",
"id": "8e50fe8ad0dbe5ac4d79701552680314100c4808",
"size": "4318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/core/project_info/list_target_types_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
# Helpful representation of this object
def __str__(self): # __unicode__ if you use Python 2.x
return self.question_text
# Custom method
def was_published_recently(self):
# Before fixing a bug
# return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
# After
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
# Helpful representation of this object
def __str__(self): # __unicode__ if you use Python 2.x
return self.choice_text
| {
"content_hash": "5abe14dd2e279df79dd43079480140c0",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 36.59375,
"alnum_prop": 0.6840307429547395,
"repo_name": "roman-kachanovsky/django-polls-tutorial19",
"id": "d372bab22c3c021664bbd1c35ac5770ebcd8ef54",
"size": "1171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polls/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "149"
},
{
"name": "HTML",
"bytes": "1492"
},
{
"name": "Python",
"bytes": "12296"
}
],
"symlink_target": ""
} |
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class VPNService(neutron.NeutronResource):
"""
A resource for VPN service in Neutron.
"""
PROPERTIES = (
NAME, DESCRIPTION, ADMIN_STATE_UP,
SUBNET_ID, SUBNET, ROUTER_ID, ROUTER
) = (
'name', 'description', 'admin_state_up',
'subnet_id', 'subnet', 'router_id', 'router'
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, DESCRIPTION_ATTR, NAME_ATTR, ROUTER_ID_ATTR,
STATUS, SUBNET_ID_ATTR, TENANT_ID,
) = (
'admin_state_up', 'description', 'name', 'router_id',
'status', 'subnet_id', 'tenant_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the vpn service.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the vpn service.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('Administrative state for the vpn service.'),
default=True,
update_allowed=True
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
support_status=support.SupportStatus(
status=support.HIDDEN,
message=_('Use property %s.') % SUBNET,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2'
)
),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet in which the vpn service will be created.'),
support_status=support.SupportStatus(version='2014.2'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
ROUTER_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the router to which the vpn service '
'will be inserted.'),
support_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s') % ROUTER,
version='2015.1',
previous_status=support.SupportStatus(version='2013.2')),
constraints=[
constraints.CustomConstraint('neutron.router')
]
),
ROUTER: properties.Schema(
properties.Schema.STRING,
_('The router to which the vpn service will be inserted.'),
support_status=support.SupportStatus(version='2015.1'),
constraints=[
constraints.CustomConstraint('neutron.router')
]
)
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of the vpn service.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the vpn service.'),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_('The name of the vpn service.'),
type=attributes.Schema.STRING
),
ROUTER_ID_ATTR: attributes.Schema(
_('The unique identifier of the router to which the vpn service '
'was inserted.'),
type=attributes.Schema.STRING
),
STATUS: attributes.Schema(
_('The status of the vpn service.'),
type=attributes.Schema.STRING
),
SUBNET_ID_ATTR: attributes.Schema(
_('The unique identifier of the subnet in which the vpn service '
'was created.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the vpn service.'),
type=attributes.Schema.STRING
),
}
def translation_rules(self):
return [
properties.TranslationRule(
self.properties,
properties.TranslationRule.REPLACE,
[self.SUBNET],
value_path=[self.SUBNET_ID]
)
]
def _show_resource(self):
return self.neutron().show_vpnservice(self.resource_id)['vpnservice']
def validate(self):
super(VPNService, self).validate()
self._validate_depr_property_required(
self.properties, self.SUBNET, self.SUBNET_ID)
self._validate_depr_property_required(
self.properties, self.ROUTER, self.ROUTER_ID)
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_subnet(props, self.SUBNET, 'subnet_id')
self.client_plugin().resolve_router(props, self.ROUTER, 'router_id')
vpnservice = self.neutron().create_vpnservice({'vpnservice': props})[
'vpnservice']
self.resource_id_set(vpnservice['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.neutron().update_vpnservice(self.resource_id,
{'vpnservice': prop_diff})
def handle_delete(self):
client = self.neutron()
try:
client.delete_vpnservice(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class IPsecSiteConnection(neutron.NeutronResource):
"""
A resource for IPsec site connection in Neutron.
"""
PROPERTIES = (
NAME, DESCRIPTION, PEER_ADDRESS, PEER_ID, PEER_CIDRS, MTU,
DPD, PSK, INITIATOR, ADMIN_STATE_UP, IKEPOLICY_ID,
IPSECPOLICY_ID, VPNSERVICE_ID,
) = (
'name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'mtu',
'dpd', 'psk', 'initiator', 'admin_state_up', 'ikepolicy_id',
'ipsecpolicy_id', 'vpnservice_id',
)
_DPD_KEYS = (
DPD_ACTIONS, DPD_INTERVAL, DPD_TIMEOUT,
) = (
'actions', 'interval', 'timeout',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, AUTH_MODE, DESCRIPTION_ATTR, DPD_ATTR,
IKEPOLICY_ID_ATTR, INITIATOR_ATTR, IPSECPOLICY_ID_ATTR, MTU_ATTR,
NAME_ATTR, PEER_ADDRESS_ATTR, PEER_CIDRS_ATTR, PEER_ID_ATTR, PSK_ATTR,
ROUTE_MODE, STATUS, TENANT_ID, VPNSERVICE_ID_ATTR,
) = (
'admin_state_up', 'auth_mode', 'description', 'dpd',
'ikepolicy_id', 'initiator', 'ipsecpolicy_id', 'mtu',
'name', 'peer_address', 'peer_cidrs', 'peer_id', 'psk',
'route_mode', 'status', 'tenant_id', 'vpnservice_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the ipsec site connection.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the ipsec site connection.'),
update_allowed=True
),
PEER_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('Remote branch router public IPv4 address or IPv6 address or '
'FQDN.'),
required=True
),
PEER_ID: properties.Schema(
properties.Schema.STRING,
_('Remote branch router identity.'),
required=True
),
PEER_CIDRS: properties.Schema(
properties.Schema.LIST,
_('Remote subnet(s) in CIDR format.'),
required=True,
schema=properties.Schema(
properties.Schema.STRING,
constraints=[
constraints.CustomConstraint('net_cidr')
]
)
),
MTU: properties.Schema(
properties.Schema.INTEGER,
_('Maximum transmission unit size (in bytes) for the ipsec site '
'connection.'),
default=1500
),
DPD: properties.Schema(
properties.Schema.MAP,
_('Dead Peer Detection protocol configuration for the ipsec site '
'connection.'),
schema={
DPD_ACTIONS: properties.Schema(
properties.Schema.STRING,
_('Controls DPD protocol mode.'),
default='hold',
constraints=[
constraints.AllowedValues(['clear', 'disabled',
'hold', 'restart',
'restart-by-peer']),
]
),
DPD_INTERVAL: properties.Schema(
properties.Schema.INTEGER,
_('Number of seconds for the DPD delay.'),
default=30
),
DPD_TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Number of seconds for the DPD timeout.'),
default=120
),
}
),
PSK: properties.Schema(
properties.Schema.STRING,
_('Pre-shared key string for the ipsec site connection.'),
required=True
),
INITIATOR: properties.Schema(
properties.Schema.STRING,
_('Initiator state in lowercase for the ipsec site connection.'),
default='bi-directional',
constraints=[
constraints.AllowedValues(['bi-directional', 'response-only']),
]
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('Administrative state for the ipsec site connection.'),
default=True,
update_allowed=True
),
IKEPOLICY_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the ike policy associated with the '
'ipsec site connection.'),
required=True
),
IPSECPOLICY_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the ipsec policy associated with the '
'ipsec site connection.'),
required=True
),
VPNSERVICE_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the vpn service associated with the '
'ipsec site connection.'),
required=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of the ipsec site connection.'),
type=attributes.Schema.STRING
),
AUTH_MODE: attributes.Schema(
_('The authentication mode of the ipsec site connection.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the ipsec site connection.'),
type=attributes.Schema.STRING
),
DPD_ATTR: attributes.Schema(
_('The dead peer detection protocol configuration of the ipsec '
'site connection.'),
type=attributes.Schema.MAP
),
IKEPOLICY_ID_ATTR: attributes.Schema(
_('The unique identifier of ike policy associated with the ipsec '
'site connection.'),
type=attributes.Schema.STRING
),
INITIATOR_ATTR: attributes.Schema(
_('The initiator of the ipsec site connection.'),
type=attributes.Schema.STRING
),
IPSECPOLICY_ID_ATTR: attributes.Schema(
_('The unique identifier of ipsec policy associated with the '
'ipsec site connection.'),
type=attributes.Schema.STRING
),
MTU_ATTR: attributes.Schema(
_('The maximum transmission unit size (in bytes) of the ipsec '
'site connection.'),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_('The name of the ipsec site connection.'),
type=attributes.Schema.STRING
),
PEER_ADDRESS_ATTR: attributes.Schema(
_('The remote branch router public IPv4 address or IPv6 address '
'or FQDN.'),
type=attributes.Schema.STRING
),
PEER_CIDRS_ATTR: attributes.Schema(
_('The remote subnet(s) in CIDR format of the ipsec site '
'connection.'),
type=attributes.Schema.LIST
),
PEER_ID_ATTR: attributes.Schema(
_('The remote branch router identity of the ipsec site '
'connection.'),
type=attributes.Schema.STRING
),
PSK_ATTR: attributes.Schema(
_('The pre-shared key string of the ipsec site connection.'),
type=attributes.Schema.STRING
),
ROUTE_MODE: attributes.Schema(
_('The route mode of the ipsec site connection.'),
type=attributes.Schema.STRING
),
STATUS: attributes.Schema(
_('The status of the ipsec site connection.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the ipsec site '
'connection.'),
type=attributes.Schema.STRING
),
VPNSERVICE_ID_ATTR: attributes.Schema(
_('The unique identifier of vpn service associated with the ipsec '
'site connection.'),
type=attributes.Schema.STRING
),
}
def _show_resource(self):
return self.neutron().show_ipsec_site_connection(self.resource_id)[
'ipsec_site_connection']
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
ipsec_site_connection = self.neutron().create_ipsec_site_connection(
{'ipsec_site_connection': props})['ipsec_site_connection']
self.resource_id_set(ipsec_site_connection['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.neutron().update_ipsec_site_connection(
self.resource_id, {'ipsec_site_connection': prop_diff})
def handle_delete(self):
client = self.neutron()
try:
client.delete_ipsec_site_connection(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class IKEPolicy(neutron.NeutronResource):
"""
A resource for IKE policy in Neutron.
"""
PROPERTIES = (
NAME, DESCRIPTION, AUTH_ALGORITHM, ENCRYPTION_ALGORITHM,
PHASE1_NEGOTIATION_MODE, LIFETIME, PFS, IKE_VERSION,
) = (
'name', 'description', 'auth_algorithm', 'encryption_algorithm',
'phase1_negotiation_mode', 'lifetime', 'pfs', 'ike_version',
)
_LIFETIME_KEYS = (
LIFETIME_UNITS, LIFETIME_VALUE,
) = (
'units', 'value',
)
ATTRIBUTES = (
AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCRYPTION_ALGORITHM_ATTR,
IKE_VERSION_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR,
PHASE1_NEGOTIATION_MODE_ATTR, TENANT_ID,
) = (
'auth_algorithm', 'description', 'encryption_algorithm',
'ike_version', 'lifetime', 'name', 'pfs',
'phase1_negotiation_mode', 'tenant_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the ike policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the ike policy.'),
update_allowed=True
),
AUTH_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Authentication hash algorithm for the ike policy.'),
default='sha1',
constraints=[
constraints.AllowedValues(['sha1']),
]
),
ENCRYPTION_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Encryption algorithm for the ike policy.'),
default='aes-128',
constraints=[
constraints.AllowedValues(['3des', 'aes-128', 'aes-192',
'aes-256']),
]
),
PHASE1_NEGOTIATION_MODE: properties.Schema(
properties.Schema.STRING,
_('Negotiation mode for the ike policy.'),
default='main',
constraints=[
constraints.AllowedValues(['main']),
]
),
LIFETIME: properties.Schema(
properties.Schema.MAP,
_('Safety assessment lifetime configuration for the ike policy.'),
schema={
LIFETIME_UNITS: properties.Schema(
properties.Schema.STRING,
_('Safety assessment lifetime units.'),
default='seconds',
constraints=[
constraints.AllowedValues(['seconds', 'kilobytes']),
]
),
LIFETIME_VALUE: properties.Schema(
properties.Schema.INTEGER,
_('Safety assessment lifetime value in specified '
'units.'),
default=3600
),
}
),
PFS: properties.Schema(
properties.Schema.STRING,
_('Perfect forward secrecy in lowercase for the ike policy.'),
default='group5',
constraints=[
constraints.AllowedValues(['group2', 'group5', 'group14']),
]
),
IKE_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version for the ike policy.'),
default='v1',
constraints=[
constraints.AllowedValues(['v1', 'v2']),
]
),
}
attributes_schema = {
AUTH_ALGORITHM_ATTR: attributes.Schema(
_('The authentication hash algorithm used by the ike policy.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the ike policy.'),
type=attributes.Schema.STRING
),
ENCRYPTION_ALGORITHM_ATTR: attributes.Schema(
_('The encryption algorithm used by the ike policy.'),
type=attributes.Schema.STRING
),
IKE_VERSION_ATTR: attributes.Schema(
_('The version of the ike policy.'),
type=attributes.Schema.STRING
),
LIFETIME_ATTR: attributes.Schema(
_('The safety assessment lifetime configuration for the ike '
'policy.'),
type=attributes.Schema.MAP
),
NAME_ATTR: attributes.Schema(
_('The name of the ike policy.'),
type=attributes.Schema.STRING
),
PFS_ATTR: attributes.Schema(
_('The perfect forward secrecy of the ike policy.'),
type=attributes.Schema.STRING
),
PHASE1_NEGOTIATION_MODE_ATTR: attributes.Schema(
_('The negotiation mode of the ike policy.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the ike policy.'),
type=attributes.Schema.STRING
),
}
def _show_resource(self):
return self.neutron().show_ikepolicy(self.resource_id)['ikepolicy']
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
ikepolicy = self.neutron().create_ikepolicy({'ikepolicy': props})[
'ikepolicy']
self.resource_id_set(ikepolicy['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.neutron().update_ikepolicy(self.resource_id,
{'ikepolicy': prop_diff})
def handle_delete(self):
client = self.neutron()
try:
client.delete_ikepolicy(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class IPsecPolicy(neutron.NeutronResource):
"""
A resource for IPsec policy in Neutron.
"""
PROPERTIES = (
NAME, DESCRIPTION, TRANSFORM_PROTOCOL, ENCAPSULATION_MODE,
AUTH_ALGORITHM, ENCRYPTION_ALGORITHM, LIFETIME, PFS,
) = (
'name', 'description', 'transform_protocol', 'encapsulation_mode',
'auth_algorithm', 'encryption_algorithm', 'lifetime', 'pfs',
)
_LIFETIME_KEYS = (
LIFETIME_UNITS, LIFETIME_VALUE,
) = (
'units', 'value',
)
ATTRIBUTES = (
AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCAPSULATION_MODE_ATTR,
ENCRYPTION_ALGORITHM_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR,
TENANT_ID, TRANSFORM_PROTOCOL_ATTR,
) = (
'auth_algorithm', 'description', 'encapsulation_mode',
'encryption_algorithm', 'lifetime', 'name', 'pfs',
'tenant_id', 'transform_protocol',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the ipsec policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the ipsec policy.'),
update_allowed=True
),
TRANSFORM_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Transform protocol for the ipsec policy.'),
default='esp',
constraints=[
constraints.AllowedValues(['esp', 'ah', 'ah-esp']),
]
),
ENCAPSULATION_MODE: properties.Schema(
properties.Schema.STRING,
_('Encapsulation mode for the ipsec policy.'),
default='tunnel',
constraints=[
constraints.AllowedValues(['tunnel', 'transport']),
]
),
AUTH_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Authentication hash algorithm for the ipsec policy.'),
default='sha1',
constraints=[
constraints.AllowedValues(['sha1']),
]
),
ENCRYPTION_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Encryption algorithm for the ipsec policy.'),
default='aes-128',
constraints=[
constraints.AllowedValues(['3des', 'aes-128', 'aes-192',
'aes-256']),
]
),
LIFETIME: properties.Schema(
properties.Schema.MAP,
_('Safety assessment lifetime configuration for the ipsec '
'policy.'),
schema={
LIFETIME_UNITS: properties.Schema(
properties.Schema.STRING,
_('Safety assessment lifetime units.'),
default='seconds',
constraints=[
constraints.AllowedValues(['seconds',
'kilobytes']),
]
),
LIFETIME_VALUE: properties.Schema(
properties.Schema.INTEGER,
_('Safety assessment lifetime value in specified '
'units.'),
default=3600
),
}
),
PFS: properties.Schema(
properties.Schema.STRING,
_('Perfect forward secrecy for the ipsec policy.'),
default='group5',
constraints=[
constraints.AllowedValues(['group2', 'group5', 'group14']),
]
),
}
attributes_schema = {
AUTH_ALGORITHM_ATTR: attributes.Schema(
_('The authentication hash algorithm of the ipsec policy.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the ipsec policy.'),
type=attributes.Schema.STRING
),
ENCAPSULATION_MODE_ATTR: attributes.Schema(
_('The encapsulation mode of the ipsec policy.'),
type=attributes.Schema.STRING
),
ENCRYPTION_ALGORITHM_ATTR: attributes.Schema(
_('The encryption algorithm of the ipsec policy.'),
type=attributes.Schema.STRING
),
LIFETIME_ATTR: attributes.Schema(
_('The safety assessment lifetime configuration of the ipsec '
'policy.'),
type=attributes.Schema.MAP
),
NAME_ATTR: attributes.Schema(
_('The name of the ipsec policy.'),
type=attributes.Schema.STRING
),
PFS_ATTR: attributes.Schema(
_('The perfect forward secrecy of the ipsec policy.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the ipsec policy.'),
type=attributes.Schema.STRING
),
TRANSFORM_PROTOCOL_ATTR: attributes.Schema(
_('The transform protocol of the ipsec policy.'),
type=attributes.Schema.STRING
),
}
def _show_resource(self):
return self.neutron().show_ipsecpolicy(self.resource_id)['ipsecpolicy']
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
ipsecpolicy = self.neutron().create_ipsecpolicy(
{'ipsecpolicy': props})['ipsecpolicy']
self.resource_id_set(ipsecpolicy['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.neutron().update_ipsecpolicy(self.resource_id,
{'ipsecpolicy': prop_diff})
def handle_delete(self):
client = self.neutron()
try:
client.delete_ipsecpolicy(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def resource_mapping():
return {
'OS::Neutron::VPNService': VPNService,
'OS::Neutron::IPsecSiteConnection': IPsecSiteConnection,
'OS::Neutron::IKEPolicy': IKEPolicy,
'OS::Neutron::IPsecPolicy': IPsecPolicy,
}
| {
"content_hash": "8e3e59acf5817f7ccb9e1779ce43f504",
"timestamp": "",
"source": "github",
"line_count": 762,
"max_line_length": 79,
"avg_line_length": 35.89238845144357,
"alnum_prop": 0.5429616087751371,
"repo_name": "miguelgrinberg/heat",
"id": "b7017026c4e370d9e71d551413af3ed32474455f",
"size": "27925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/neutron/vpnservice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6541741"
},
{
"name": "Shell",
"bytes": "33395"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="isosurface.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "12401a4aa8b77adf1d98094b5420e87c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 32.92857142857143,
"alnum_prop": 0.5943600867678959,
"repo_name": "plotly/plotly.py",
"id": "2184e2a0501e84280b5a57028b59c0307175de1e",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/colorbar/_ticklen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT license applies.
*
* Website : http://www.synerty.com
* Support : support@synerty.com
"""
import json
from typing import Any, Dict, Optional
from vortex.Tuple import Tuple, addTupleType, TupleField
@addTupleType
class TupleSelector(Tuple):
__tupleType__ = "vortex.TupleSelector"
__slots__ = ["name", "selector"]
# name: str = TupleField(comment="The tuple name this selector is for")
# selector: Dict[str, Any] = TupleField(comment="The values to select")
def __init__(self, name: Optional[str] = None,
selector: Optional[Dict] = None) -> None:
Tuple.__init__(self)
self.name = name
self.selector = selector if selector else {}
def __eq__(x, y):
return x.toJsonStr() == y.toJsonStr()
def __hash__(self):
return hash(self.toJsonStr())
def toJsonStr(self) -> str:
""" To Json Str
This method dumps the c{TupleSelector} data to a json string.
It sorts the dict keys and
"""
fieldJsonDict = self.toJsonField(self.selector)
return json.dumps({'name': self.name,
'selector': fieldJsonDict}, sort_keys=True)
@classmethod
def fromJsonStr(self, jsonStr: str) -> "TupleSelector":
""" From Json Str
This method creates a new c{TupleSelector} from the ordered json string dumped
from .toJsonStr
"""
data = json.loads(jsonStr)
newTs = TupleSelector(name=data["name"], selector={})
newTs.selector = newTs.fromJsonField(data["selector"])
return newTs
def __repr__(self):
return self.toJsonStr()
| {
"content_hash": "3bd8161c7849c775969050588021e8c3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 86,
"avg_line_length": 28.716666666666665,
"alnum_prop": 0.6111433546140452,
"repo_name": "Synerty/vortexpy",
"id": "dd718f4694624323353b07fef609c65017e4ed02",
"size": "1723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vortex/TupleSelector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "220269"
},
{
"name": "Shell",
"bytes": "1986"
}
],
"symlink_target": ""
} |
from pandac.PandaModules import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.fsm import StateData
from toontown.toon import ToonAvatarPanel
from toontown.friends import ToontownFriendSecret
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
FLPPets = 1
FLPOnline = 2
FLPAll = 3
FLPOnlinePlayers = 4
FLPPlayers = 5
FLPEnemies = 6
globalFriendsList = None
def determineFriendName(friendTuple):
friendName = None
if len(friendTuple) == 2:
avId, flags = friendTuple
playerId = None
showType = 0
elif len(friendTuple) == 3:
avId, flags, playerId = friendTuple
showType = 0
elif len(friendTuple) == 4:
avId, flags, playerId, showType = friendTuple
if showType == 1 and playerId:
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
friendName = playerInfo.playerName
else:
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(avId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(avId)
if handle:
friendName = handle.getName()
return friendName
def compareFriends(f1, f2):
name1 = determineFriendName(f1)
name2 = determineFriendName(f2)
if name1 > name2:
return 1
elif name1 == name2:
return 0
else:
return -1
def showFriendsList():
global globalFriendsList
if globalFriendsList == None:
globalFriendsList = FriendsListPanel()
globalFriendsList.enter()
return
def hideFriendsList():
if globalFriendsList != None:
globalFriendsList.exit()
return
def showFriendsListTutorial():
global globalFriendsList
if globalFriendsList == None:
globalFriendsList = FriendsListPanel()
globalFriendsList.enter()
if not base.cr.isPaid():
globalFriendsList.secrets['state'] = DGG.DISABLED
globalFriendsList.closeCommand = globalFriendsList.close['command']
globalFriendsList.close['command'] = None
return
def hideFriendsListTutorial():
if globalFriendsList != None:
if hasattr(globalFriendsList, 'closeCommand'):
globalFriendsList.close['command'] = globalFriendsList.closeCommand
if not base.cr.isPaid():
globalFriendsList.secrets['state'] = DGG.NORMAL
globalFriendsList.exit()
return
def isFriendsListShown():
if globalFriendsList != None:
return globalFriendsList.isEntered
return 0
def unloadFriendsList():
global globalFriendsList
if globalFriendsList != None:
globalFriendsList.unload()
globalFriendsList = None
return
class FriendsListPanel(DirectFrame, StateData.StateData):
def __init__(self):
self.leftmostPanel = FLPPets
self.rightmostPanel = FLPPlayers
if base.cr.productName in ['DisneyOnline-UK',
'DisneyOnline-AP',
'JP',
'FR',
'BR']:
self.rightmostPanel = FLPAll
DirectFrame.__init__(self, relief=None)
self.listScrollIndex = [0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0]
self.initialiseoptions(FriendsListPanel)
StateData.StateData.__init__(self, 'friends-list-done')
self.friends = {}
self.textRolloverColor = Vec4(1, 1, 0, 1)
self.textDownColor = Vec4(0.5, 0.9, 1, 1)
self.textDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
self.panelType = FLPOnline
return
def load(self):
if self.isLoaded == 1:
return None
self.isLoaded = 1
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
auxGui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
self.title = DirectLabel(parent=self, relief=None, text='', text_scale=TTLocalizer.FLPtitle, text_fg=(0, 0.1, 0.4, 1), pos=(0.007, 0.0, 0.2))
background_image = gui.find('**/FriendsBox_Open')
self['image'] = background_image
self.reparentTo(base.a2dTopRight)
self.setPos(-0.233, 0, -0.46)
self.scrollList = DirectScrolledList(parent=self, relief=None, incButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.316), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), incButton_scale=(1.0, 1.0, -1.0), decButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.117), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_pos=(-0.17, 0.0, 0.06), itemFrame_relief=None, numItemsVisible=8, items=[])
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.2, 0, 0)))
clipNP = self.scrollList.attachNewNode(clipper)
self.scrollList.setClipPlane(clipNP)
self.close = DirectButton(parent=self, relief=None, image=(auxGui.find('**/CloseBtn_UP'), auxGui.find('**/CloseBtn_DN'), auxGui.find('**/CloseBtn_Rllvr')), pos=(0.01, 0, -0.38), command=self.__close)
self.left = DirectButton(parent=self, relief=None, image=(gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')), image3_color=Vec4(0.6, 0.6, 0.6, 0.6), pos=(-0.15, 0.0, -0.38), scale=(-1.0, 1.0, 1.0), command=self.__left)
self.right = DirectButton(parent=self, relief=None, image=(gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')), image3_color=Vec4(0.6, 0.6, 0.6, 0.6), pos=(0.17, 0, -0.38), command=self.__right)
self.newFriend = DirectButton(parent=self, relief=None, pos=(-0.14, 0.0, 0.14), image=(auxGui.find('**/Frnds_Btn_UP'), auxGui.find('**/Frnds_Btn_DN'), auxGui.find('**/Frnds_Btn_RLVR')), text=('', TTLocalizer.FriendsListPanelNewFriend, TTLocalizer.FriendsListPanelNewFriend), text_scale=TTLocalizer.FLPnewFriend, text_fg=(0, 0, 0, 1), text_bg=(1, 1, 1, 1), text_pos=(0.1, -0.085), textMayChange=0, command=self.__newFriend)
self.secrets = DirectButton(parent=self, relief=None, pos=TTLocalizer.FLPsecretsPos, image=(auxGui.find('**/ChtBx_ChtBtn_UP'), auxGui.find('**/ChtBx_ChtBtn_DN'), auxGui.find('**/ChtBx_ChtBtn_RLVR')), text=('',
TTLocalizer.FriendsListPanelSecrets,
TTLocalizer.FriendsListPanelSecrets,
''), text_scale=TTLocalizer.FLPsecrets, text_fg=(0, 0, 0, 1), text_bg=(1, 1, 1, 1), text_pos=(-0.04, -0.085), textMayChange=0, command=self.__secrets)
gui.removeNode()
auxGui.removeNode()
return
def unload(self):
if self.isLoaded == 0:
return None
self.isLoaded = 0
self.exit()
del self.title
del self.scrollList
del self.close
del self.left
del self.right
del self.friends
DirectFrame.destroy(self)
return None
def makeFriendButton(self, friendTuple, colorChoice = None, bold = 0):
playerName = None
toonName = None
if len(friendTuple) == 2:
avId, flags = friendTuple
playerId = None
showType = 0
elif len(friendTuple) == 3:
avId, flags, playerId = friendTuple
showType = 0
elif len(friendTuple) == 4:
avId, flags, playerId, showType = friendTuple
command = self.__choseFriend
playerName = None
if playerId:
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId, None)
if playerInfo:
playerName = playerInfo.playerName
toonName = None
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(avId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(avId)
if handle:
toonName = handle.getName()
if showType == 1 and playerId:
if not playerName:
return
friendName = playerName
rolloverName = toonName
else:
if not toonName:
base.cr.fillUpFriendsMap()
return
friendName = toonName
if playerName:
rolloverName = playerName
else:
rolloverName = 'Unknown'
if playerId:
command = self.__chosePlayerFriend
thing = playerId
else:
thing = avId
fg = ToontownGlobals.ColorNoChat
if flags & ToontownGlobals.FriendChat:
fg = ToontownGlobals.ColorAvatar
if playerId:
fg = ToontownGlobals.ColorPlayer
if colorChoice:
fg = colorChoice
fontChoice = ToontownGlobals.getToonFont()
fontScale = 0.04
bg = None
if colorChoice and bold:
fontScale = 0.04
colorS = 0.7
bg = (colorChoice[0] * colorS,
colorChoice[1] * colorS,
colorChoice[2] * colorS,
colorChoice[3])
db = DirectButton(relief=None, text=friendName, text_scale=fontScale, text_align=TextNode.ALeft, text_fg=fg, text_shadow=bg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, text_font=fontChoice, textMayChange=0, command=command, extraArgs=[thing, showType])
if playerId:
accountName = DirectLabel(parent=db, pos=Vec3(-0.02, 0, 0), text=rolloverName, text_fg=(0, 0, 0, 1), text_bg=(1, 1, 1, 1), text_pos=(0, 0), text_scale=0.045, text_align=TextNode.ARight)
accountName.reparentTo(db.stateNodePath[2])
return db
def enter(self):
if self.isEntered == 1:
return None
self.isEntered = 1
if self.isLoaded == 0:
self.load()
base.localAvatar.obscureFriendsListButton(1)
if ToonAvatarPanel.ToonAvatarPanel.currentAvatarPanel:
ToonAvatarPanel.ToonAvatarPanel.currentAvatarPanel.cleanup()
ToonAvatarPanel.ToonAvatarPanel.currentAvatarPanel = None
self.__updateScrollList()
self.__updateTitle()
self.__updateArrows()
self.show()
self.accept('friendOnline', self.__friendOnline)
self.accept('friendPlayers', self.__friendPlayers)
self.accept('friendOffline', self.__friendOffline)
self.accept('friendsListChanged', self.__friendsListChanged)
self.accept('ignoreListChanged', self.__ignoreListChanged)
self.accept('friendsMapComplete', self.__friendsListChanged)
self.accept(OTPGlobals.PlayerFriendAddEvent, self.__friendsListChanged)
self.accept(OTPGlobals.PlayerFriendUpdateEvent, self.__friendsListChanged)
return
def exit(self):
if self.isEntered == 0:
return None
self.isEntered = 0
self.listScrollIndex[self.panelType] = self.scrollList.index
self.hide()
base.cr.cleanPetsFromFriendsMap()
self.ignore('friendOnline')
self.ignore('friendOffline')
self.ignore('friendsListChanged')
self.ignore('ignoreListChanged')
self.ignore('friendsMapComplete')
self.ignore(OTPGlobals.PlayerFriendAddEvent)
self.ignore(OTPGlobals.PlayerFriendUpdateEvent)
base.localAvatar.obscureFriendsListButton(-1)
messenger.send(self.doneEvent)
return None
def __close(self):
messenger.send('wakeup')
self.exit()
def __left(self):
messenger.send('wakeup')
self.listScrollIndex[self.panelType] = self.scrollList.index
if self.panelType > self.leftmostPanel:
self.panelType -= 1
self.__updateScrollList()
self.__updateTitle()
self.__updateArrows()
def __right(self):
messenger.send('wakeup')
self.listScrollIndex[self.panelType] = self.scrollList.index
if self.panelType < self.rightmostPanel:
self.panelType += 1
self.__updateScrollList()
self.__updateTitle()
self.__updateArrows()
def __secrets(self):
messenger.send('wakeup')
ToontownFriendSecret.showFriendSecret(ToontownFriendSecret.AvatarSecret)
def __newFriend(self):
messenger.send('wakeup')
messenger.send('friendAvatar', [None, None, None])
return
def __choseFriend(self, friendId, showType = 0):
messenger.send('wakeup')
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(friendId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(friendId)
if handle != None:
self.notify.info("Clicked on name in friend's list. doId = %s" % handle.doId)
messenger.send('clickedNametag', [handle])
return
def __chosePlayerFriend(self, friendId, showType = 1):
messenger.send('wakeup')
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = None
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(friendId)
handle = base.cr.identifyFriend(playerFriendInfo.avatarId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(playerFriendInfo.avatarId)
if playerFriendInfo != None:
self.notify.info("Clicked on name in player friend's list. Id = %s" % friendId)
messenger.send('clickedNametagPlayer', [handle, friendId, showType])
return
def __updateScrollList(self):
newFriends = []
petFriends = []
freeChatOneRef = []
speedChatOneRef = []
freeChatDouble = []
speedChatDouble = []
offlineFriends = []
if self.panelType == FLPPlayers:
playerFriendList = base.cr.playerFriendsManager.playerFriendsList
for playerFriendId in playerFriendList:
if playerFriendId in base.cr.playerFriendsManager.playerId2Info:
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerFriendId)
if playerFriendInfo.onlineYesNo:
if playerFriendInfo.understandableYesNo:
if playerFriendInfo.avatarId:
freeChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
freeChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
elif playerFriendInfo.avatarId:
speedChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
speedChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
elif playerFriendInfo.understandableYesNo:
freeChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
else:
speedChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
if self.panelType == FLPOnlinePlayers:
playerFriendList = base.cr.playerFriendsManager.playerFriendsList
for playerFriendId in playerFriendList:
if playerFriendId in base.cr.playerFriendsManager.playerId2Info:
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerFriendId)
if playerFriendInfo.onlineYesNo:
if playerFriendInfo.understandableYesNo:
if playerFriendInfo.avatarId:
freeChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
freeChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
elif playerFriendInfo.avatarId:
speedChatDouble.insert(0, (playerFriendInfo.avatarId,
0,
playerFriendId,
1))
else:
speedChatOneRef.insert(0, (0,
0,
playerFriendId,
1))
if self.panelType == FLPAll:
if base.friendMode == 0:
for friendPair in base.localAvatar.friendsList:
playerId = 0
if hasattr(base.cr, 'playerFriendsManager'):
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendPair[0])
if playerId:
if friendPair[1] & ToontownGlobals.FriendChat:
freeChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
else:
speedChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
elif base.cr.isFriendOnline(friendPair[0]):
if friendPair[1] & ToontownGlobals.FriendChat:
freeChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
speedChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
elif friendPair[1] & ToontownGlobals.FriendChat:
freeChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
speedChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
offlineFriends.append((friendPair[0],
friendPair[1],
playerId,
0))
if hasattr(base.cr, 'playerFriendsManager'):
for avatarId in base.cr.playerFriendsManager.getAllOnlinePlayerAvatars():
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(avatarId)
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
if not base.cr.playerFriendsManager.askAvatarKnownElseWhere(avatarId):
if playerFriendInfo.understandableYesNo:
freeChatDouble.insert(0, (avatarId,
0,
playerId,
0))
else:
speedChatDouble.insert(0, (avatarId,
0,
playerId,
0))
elif base.friendMode == 1:
for friendId in base.cr.avatarFriendsManager.avatarFriendsList:
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendId)
newFriends.append((friendId,
0,
playerId,
0))
if self.panelType == FLPOnline:
if base.friendMode == 0:
for friendPair in base.localAvatar.friendsList:
if hasattr(base.cr, 'playerFriendsManager') and base.cr.isFriendOnline(friendPair[0]):
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendPair[0])
if playerId:
if friendPair[1] & ToontownGlobals.FriendChat:
freeChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
else:
speedChatDouble.insert(0, (friendPair[0],
friendPair[1],
playerId,
0))
elif friendPair[1] & ToontownGlobals.FriendChat:
freeChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
else:
speedChatOneRef.insert(0, (friendPair[0],
friendPair[1],
0,
0))
elif base.cr.isFriendOnline(friendPair[0]):
offlineFriends.append((friendPair[0],
friendPair[1],
0,
0))
if hasattr(base.cr, 'playerFriendsManager'):
for avatarId in base.cr.playerFriendsManager.getAllOnlinePlayerAvatars():
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(avatarId)
playerFriendInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
if not base.cr.playerFriendsManager.askAvatarKnownElseWhere(avatarId):
if playerFriendInfo.understandableYesNo:
freeChatDouble.insert(0, (avatarId,
0,
playerId,
0))
else:
speedChatDouble.insert(0, (avatarId,
0,
playerId,
0))
elif base.friendMode == 1:
for friendId in base.cr.avatarFriendsManager.avatarFriendsList:
friendInfo = base.cr.avatarFriendsManager.avatarId2Info[friendId]
playerId = base.cr.playerFriendsManager.findPlayerIdFromAvId(friendPair[0])
if friendInfo.onlineYesNo:
newFriends.insert(0, (friendId,
0,
playerId,
0))
if self.panelType == FLPPets:
for objId, obj in base.cr.doId2do.items():
from toontown.pets import DistributedPet
if isinstance(obj, DistributedPet.DistributedPet):
friendPair = (objId, 0)
petFriends.append(friendPair)
if self.panelType == FLPEnemies:
for ignored in base.localAvatar.ignoreList:
newFriends.append((ignored, 0))
if self.panelType == FLPAll or self.panelType == FLPOnline:
if base.wantPets and base.localAvatar.hasPet():
petFriends.insert(0, (base.localAvatar.getPetId(), 0))
for friendPair in self.friends.keys():
friendButton = self.friends[friendPair]
self.scrollList.removeItem(friendButton, refresh=0)
friendButton.destroy()
del self.friends[friendPair]
newFriends.sort(compareFriends)
petFriends.sort(compareFriends)
freeChatOneRef.sort(compareFriends)
speedChatOneRef.sort(compareFriends)
freeChatDouble.sort(compareFriends)
speedChatDouble.sort(compareFriends)
offlineFriends.sort(compareFriends)
for friendPair in newFriends:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in petFriends:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorNoChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in freeChatDouble:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorFreeChat, 1)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in freeChatOneRef:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorFreeChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in speedChatDouble:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorSpeedChat, 1)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in speedChatOneRef:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorSpeedChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
for friendPair in offlineFriends:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair, ToontownGlobals.ColorNoChat, 0)
if friendButton:
self.scrollList.addItem(friendButton, refresh=0)
self.friends[friendPair] = friendButton
self.scrollList.index = self.listScrollIndex[self.panelType]
self.scrollList.refresh()
def __updateTitle(self):
if self.panelType == FLPOnline:
self.title['text'] = TTLocalizer.FriendsListPanelOnlineFriends
elif self.panelType == FLPAll:
self.title['text'] = TTLocalizer.FriendsListPanelAllFriends
elif self.panelType == FLPPets:
self.title['text'] = TTLocalizer.FriendsListPanelPets
elif self.panelType == FLPPlayers:
self.title['text'] = TTLocalizer.FriendsListPanelPlayers
elif self.panelType == FLPOnlinePlayers:
self.title['text'] = TTLocalizer.FriendsListPanelOnlinePlayers
else:
self.title['text'] = TTLocalizer.FriendsListPanelIgnoredFriends
self.title.resetFrameSize()
def __updateArrows(self):
if self.panelType == self.leftmostPanel:
self.left['state'] = 'inactive'
else:
self.left['state'] = 'normal'
if self.panelType == self.rightmostPanel:
self.right['state'] = 'inactive'
else:
self.right['state'] = 'normal'
def __friendOnline(self, doId, commonChatFlags, whitelistChatFlags):
if self.panelType == FLPOnline:
self.__updateScrollList()
def __friendOffline(self, doId):
if self.panelType == FLPOnline:
self.__updateScrollList()
def __friendPlayers(self, doId):
if self.panelType == FLPPlayers:
self.__updateScrollList()
def __friendsListChanged(self, arg1 = None, arg2 = None):
if self.panelType != FLPEnemies:
self.__updateScrollList()
def __ignoreListChanged(self):
if self.panelType == FLPEnemies:
self.__updateScrollList()
| {
"content_hash": "3f7dea7a999bbefe226e8d29b3e5fe49",
"timestamp": "",
"source": "github",
"line_count": 679,
"max_line_length": 430,
"avg_line_length": 43.08100147275405,
"alnum_prop": 0.5490564747709559,
"repo_name": "linktlh/Toontown-journey",
"id": "63919cd5d558fdf3fde5b3ef8e5fe557e19b5327",
"size": "29252",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "toontown/friends/FriendsListPanel.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bulletin', '0007_add_model_scheduled_post'),
]
operations = [
migrations.AddField(
model_name='ad',
name='display_weight',
field=models.SmallIntegerField(default=1, help_text=b'Ads appear in ascending order of Display Weight'),
),
]
| {
"content_hash": "c2cf76ede46c13ee970f9b3a0df3983b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 116,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6269315673289183,
"repo_name": "AASHE/django-bulletin",
"id": "7e98452ec9bfb0bce20754942de3b82801eca5e4",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bulletin/migrations/0008_ad_display_weight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31770"
},
{
"name": "Clojure",
"bytes": "11991"
},
{
"name": "HTML",
"bytes": "123427"
},
{
"name": "JavaScript",
"bytes": "1319347"
},
{
"name": "Procfile",
"bytes": "72"
},
{
"name": "Python",
"bytes": "271365"
}
],
"symlink_target": ""
} |
"""
WSGI config for seattlestats project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "seattlestats.settings")
application = get_wsgi_application()
| {
"content_hash": "26c66b471210c19a2bb0b93e0d33c408",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.0625,
"alnum_prop": 0.7755610972568578,
"repo_name": "lindsayrgwatt/citystats",
"id": "89f03280b3d2fb99d79fff6ca12bbed0b7d72790",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seattlestats/seattlestats/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nginx",
"bytes": "1509"
},
{
"name": "Python",
"bytes": "14003"
},
{
"name": "Shell",
"bytes": "805"
}
],
"symlink_target": ""
} |
try:
from subprocess import check_output
except ImportError:
from subprocess import Popen, PIPE, CalledProcessError
def check_output(*popenargs, **kwargs):
# Add subprocess.check_output for python 2.6
# https://gist.github.com/edufelipe/1027906
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
| {
"content_hash": "e6f6babef08ad3719da522985f928dab",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 35.26315789473684,
"alnum_prop": 0.6044776119402985,
"repo_name": "laserson/eggo",
"id": "1e28e7c4486f2cc8497fa886e0a360651ef59f5e",
"size": "1443",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "eggo/compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65136"
},
{
"name": "Shell",
"bytes": "3324"
}
],
"symlink_target": ""
} |
"""Line-like geometrical entities.
Contains
========
LinearEntity3D
Line3D
Ray3D
Segment3D
"""
from __future__ import print_function, division
from sympy.core import S, C, Dummy, nan
from sympy.simplify.simplify import simplify
from sympy.solvers import solve
from sympy.geometry.exceptions import GeometryError
from .entity import GeometryEntity
from .point3d import Point3D
from .util import _symbol
from sympy.core.compatibility import is_sequence
class LinearEntity3D(GeometryEntity):
"""An base class for all linear entities (line, ray and segment)
in a 3-dimensional Euclidean space.
Attributes
==========
p1
p2
direction_ratio
direction_cosine
points
Notes
=====
This is a base class and is not meant to be instantiated.
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point3D(p1)
p2 = Point3D(p2)
if p1 == p2:
# if it makes sense to return a Point, handle in subclass
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
@property
def p1(self):
"""The first defining point of a linear entity.
See Also
========
sympy.geometry.point3d.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.p1
Point3D(0, 0, 0)
"""
return self.args[0]
@property
def p2(self):
"""The second defining point of a linear entity.
See Also
========
sympy.geometry.point3d.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.p2
Point3D(5, 3, 1)
"""
return self.args[1]
@property
def direction_ratio(self):
"""The direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_ratio
[5, 3, 1]
"""
p1, p2 = self.points
return p1.direction_ratio(p2)
@property
def direction_cosine(self):
"""The normalized direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_cosine
[sqrt(35)/7, 3*sqrt(35)/35, sqrt(35)/35]
>>> sum(i**2 for i in _)
1
"""
p1, p2 = self.points
return p1.direction_cosine(p2)
@property
def length(self):
"""
The length of the line.
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 5, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.length
oo
"""
return S.Infinity
@property
def points(self):
"""The two points used to define this linear entity.
Returns
=======
points : tuple of Points
See Also
========
sympy.geometry.point3d.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 11, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.points
(Point3D(0, 0, 0), Point3D(5, 11, 1))
"""
return (self.p1, self.p2)
@staticmethod
def are_concurrent(*lines):
"""Is a sequence of linear entities concurrent?
Two or more linear entities are concurrent if they all
intersect at a single point.
Parameters
==========
lines : a sequence of linear entities.
Returns
=======
True : if the set of linear entities are concurrent,
False : otherwise.
Notes
=====
Simply take the first two lines and find their intersection.
If there is no intersection, then the first two lines were
parallel and had no intersection so concurrency is impossible
amongst the whole set. Otherwise, check to see if the
intersection point of the first two lines is a member on
the rest of the lines. If so, the lines are concurrent.
See Also
========
sympy.geometry.util.intersection
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 5, 2)
>>> p3, p4 = Point3D(-2, -2, -2), Point3D(0, 2, 1)
>>> l1, l2, l3 = Line3D(p1, p2), Line3D(p1, p3), Line3D(p1, p4)
>>> Line3D.are_concurrent(l1, l2, l3)
True
>>> l4 = Line3D(p2, p3)
>>> Line3D.are_concurrent(l2, l3, l4)
False
"""
# Concurrency requires intersection at a single point; One linear
# entity cannot be concurrent.
if len(lines) <= 1:
return False
try:
# Get the intersection (if parallel)
p = lines[0].intersection(lines[1])
if len(p) == 0:
return False
# Make sure the intersection is on every linear entity
for line in lines[2:]:
if p[0] not in line:
return False
return True
except AttributeError:
return False
def is_parallel(l1, l2):
"""Are two linear entities parallel?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are parallel,
False : otherwise.
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 4, 5)
>>> p3, p4 = Point3D(2, 1, 1), Point3D(8, 9, 11)
>>> l1, l2 = Line3D(p1, p2), Line3D(p3, p4)
>>> Line3D.is_parallel(l1, l2)
True
>>> p5 = Point3D(6, 6, 6)
>>> l3 = Line3D(p3, p5)
>>> Line3D.is_parallel(l1, l3)
False
"""
if l1 == l2:
return True
a = l1.direction_cosine
b = l2.direction_cosine
# lines are parallel if the direction_cosines are the same or
# differ by a constant
rat = set()
for i, j in zip(a, b):
if i and j:
rat.add(i/j)
if len(rat) > 1:
return False
elif i or j:
return False
return True
def is_perpendicular(l1, l2):
"""Are two linear entities perpendicular?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are perpendicular,
False : otherwise.
See Also
========
direction_ratio
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.is_perpendicular(l2)
False
>>> p4 = Point3D(5, 3, 7)
>>> l3 = Line3D(p1, p4)
>>> l1.is_perpendicular(l3)
False
"""
a = sum([i*j for i, j in zip(l1.direction_ratio, l2.direction_ratio)])
if a == 0:
return True
else:
return False
def angle_between(l1, l2):
"""The angle formed between the two linear entities.
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
angle : angle in radians
Notes
=====
From the dot product of vectors v1 and v2 it is known that:
``dot(v1, v2) = |v1|*|v2|*cos(A)``
where A is the angle formed between the two vectors. We can
get the directional vectors of the two lines and readily
find the angle between the two using the above formula.
See Also
========
is_perpendicular
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.angle_between(l2)
acos(-sqrt(2)/3)
"""
v1 = l1.p2 - l1.p1
v2 = l2.p2 - l2.p1
return C.acos(v1.dot(v2)/(abs(v1)*abs(v2)))
def parallel_line(self, p):
"""Create a new Line parallel to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point3D
Returns
=======
line : Line3D
See Also
========
is_parallel
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.parallel_line(p3)
>>> p3 in l2
True
>>> l1.is_parallel(l2)
True
"""
d = self.direction_ratio
return Line3D(p, direction_ratio=d)
def perpendicular_line(self, p):
"""Create a new Line perpendicular to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point3D
Returns
=======
line : Line3D
See Also
========
is_perpendicular, perpendicular_segment
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
"""
p = Point3D(p)
if p in self:
raise NotImplementedError("Given point should not be on the line")
t = Dummy()
a = self.arbitrary_point(t)
b = [i - j for i, j in zip(p.args, a.args)]
c = sum([i*j for i, j in zip(b, self.direction_ratio)])
d = solve(c, t)
e = a.subs(t, d[0])
return Line3D(p, e)
def perpendicular_segment(self, p):
"""Create a perpendicular line segment from `p` to this line.
The enpoints of the segment are ``p`` and the closest point in
the line containing self. (If self is not a line, the point might
not be in self.)
Parameters
==========
p : Point3D
Returns
=======
segment : Segment3D
Notes
=====
Returns `p` itself if `p` is on this linear entity.
See Also
========
perpendicular_line
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> s1 = l1.perpendicular_segment(p3)
>>> l1.is_perpendicular(s1)
True
>>> p3 in s1
True
>>> l1.perpendicular_segment(Point3D(4, 0, 0))
Segment3D(Point3D(4/3, 4/3, 4/3), Point3D(4, 0, 0))
"""
p = Point3D(p)
if p in self:
raise NotImplementedError("Given point should not be on the line")
t = Dummy()
a = self.arbitrary_point(t)
b = [i - j for i, j in zip(p.args, a.args)]
c = sum([i*j for i, j in zip(b, self.direction_ratio)])
d = solve(c, t)
e = a.subs(t, d[0])
return Segment3D(p, e)
def projection(self, o):
"""Project a point, line, ray, or segment onto this linear entity.
Parameters
==========
other : Point or LinearEntity (Line, Ray, Segment)
Returns
=======
projection : Point or LinearEntity (Line, Ray, Segment)
The return type matches the type of the parameter ``other``.
Raises
======
GeometryError
When method is unable to perform projection.
Notes
=====
A projection involves taking the two points that define
the linear entity and projecting those points onto a
Line and then reforming the linear entity using these
projections.
A point P is projected onto a line L by finding the point
on L that is closest to P. This point is the intersection
of L and the line perpendicular to L that passes through P.
See Also
========
sympy.geometry.point3d.Point3D, perpendicular_line
Examples
========
>>> from sympy import Point3D, Line3D, Segment3D, Rational
>>> p1, p2, p3 = Point3D(0, 0, 1), Point3D(1, 1, 2), Point3D(2, 0, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.projection(p3)
Point3D(2/3, 2/3, 5/3)
>>> p4, p5 = Point3D(10, 0, 1), Point3D(12, 1, 3)
>>> s1 = Segment3D(p4, p5)
>>> l1.projection(s1)
[Segment3D(Point3D(10/3, 10/3, 13/3), Point3D(5, 5, 6))]
"""
tline = Line3D(self.p1, self.p2)
def _project(p):
"""Project a point onto the line representing self."""
if p in tline:
return p
l1 = tline.perpendicular_line(p)
return tline.intersection(l1)[0]
projected = None
if isinstance(o, Point3D):
return _project(o)
elif isinstance(o, LinearEntity3D):
n_p1 = _project(o.p1)
n_p2 = _project(o.p2)
if n_p1 == n_p2:
projected = n_p1
else:
projected = o.__class__(n_p1, n_p2)
# Didn't know how to project so raise an error
if projected is None:
n1 = self.__class__.__name__
n2 = o.__class__.__name__
raise GeometryError(
"Do not know how to project %s onto %s" % (n2, n1))
return self.intersection(projected)
def intersection(self, o):
"""The intersection with another geometrical entity.
Parameters
==========
o : Point or LinearEntity3D
Returns
=======
intersection : list of geometrical entities
See Also
========
sympy.geometry.point3d.Point3D
Examples
========
>>> from sympy import Point3D, Line3D, Segment3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(7, 7, 7)
>>> l1 = Line3D(p1, p2)
>>> l1.intersection(p3)
[Point3D(7, 7, 7)]
>>> l1 = Line3D(Point3D(4,19,12), Point3D(5,25,17))
>>> l2 = Line3D(Point3D(-3, -15, -19), direction_ratio=[2,8,8])
>>> l1.intersection(l2)
[Point3D(1, 1, -3)]
>>> p6, p7 = Point3D(0, 5, 2), Point3D(2, 6, 3)
>>> s1 = Segment3D(p6, p7)
>>> l1.intersection(s1)
[]
"""
if isinstance(o, Point3D):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity3D):
if self == o:
return [self]
elif self.is_parallel(o):
if isinstance(self, Line3D):
if o.p1 in self:
return [o]
return []
elif isinstance(self, Ray3D):
if isinstance(o, Ray3D):
# case 1, rays in the same direction
if self.xdirection == o.xdirection and \
self.ydirection == o.ydirection and \
self.zdirection == o.zdirection:
return [self] if (self.source in o) else [o]
# case 2, rays in the opposite directions
else:
if o.source in self:
if self.source == o.source:
return [self.source]
return [Segment3D(o.source, self.source)]
return []
elif isinstance(o, Segment3D):
if o.p1 in self:
if o.p2 in self:
return [o]
return [Segment3D(o.p1, self.source)]
elif o.p2 in self:
return [Segment3D(o.p2, self.source)]
return []
elif isinstance(self, Segment3D):
if isinstance(o, Segment3D):
# A reminder that the points of Segments are ordered
# in such a way that the following works. See
# Segment3D.__new__ for details on the ordering.
if self.p1 not in o:
if self.p2 not in o:
# Neither of the endpoints are in o so either
# o is contained in this segment or it isn't
if o in self:
return [o]
return []
else:
# p1 not in o but p2 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p2 == o.p1:
return [o.p1]
return [Segment3D(o.p1, self.p2)]
elif self.p2 not in o:
# p2 not in o but p1 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p1 == o.p2:
return [o.p2]
return [Segment3D(o.p2, self.p1)]
# Both points of self in o so the whole segment
# is in o
return [self]
else: # unrecognized LinearEntity
raise NotImplementedError
else:
# If the lines are not parallel then solve their arbitrary points
# to obtain the point of intersection
t = t1, t2 = Dummy(), Dummy()
a = self.arbitrary_point(t1)
b = o.arbitrary_point(t2)
dx = a.x - b.x
c = solve([dx, a.y - b.y], t)
d = solve([dx, a.z - b.z], t)
if len(c) == 1 and len(d) == 1:
return []
e = a.subs(t1, c[t1])
if e in self and e in o:
return [e]
else:
return []
return o.intersection(self)
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the Line.
Parameters
==========
parameter : str, optional
The name of the parameter which will be used for the parametric
point. The default value is 't'. When this parameter is 0, the
first point used to define the line will be returned, and when
it is 1 the second point will be returned.
Returns
=======
point : Point3D
Raises
======
ValueError
When ``parameter`` already appears in the Line's definition.
See Also
========
sympy.geometry.point3d.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.arbitrary_point()
Point3D(4*t + 1, 3*t, t)
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name)
x = simplify(self.p1.x + t*(self.p2.x - self.p1.x))
y = simplify(self.p1.y + t*(self.p2.y - self.p1.y))
z = simplify(self.p1.z + t*(self.p2.z - self.p1.z))
return Point3D(x, y, z)
def is_similar(self, other):
"""
Return True if self and other are contained in the same line.
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(2, 2, 2)
>>> l1 = Line3D(p1, p2)
>>> l2 = Line3D(p1, p3)
>>> l1.is_similar(l2)
True
"""
if isinstance(other, Line3D):
if self.direction_cosine == other.direction_cosine and other.p1 in self:
return True
else:
return False
raise NotImplementedError()
def __contains__(self, other):
"""Return a definitive answer or else raise an error if it cannot
be determined that other is on the boundaries of self."""
result = self.contains(other)
if result is not None:
return result
else:
raise Undecidable(
"can't decide whether '%s' contains '%s'" % (self, other))
def contains(self, other):
"""Subclasses should implement this method and should return
True if other is on the boundaries of self;
False if not on the boundaries of self;
None if a determination cannot be made."""
raise NotImplementedError()
class Line3D(LinearEntity3D):
"""An infinite 3D line in space.
A line is declared with two distinct points or a point and direction_ratio
as defined using keyword `direction_ratio`.
Parameters
==========
p1 : Point3D
pt : Point3D
direction_ratio : list
See Also
========
sympy.geometry.point3d.Point3D
Examples
========
>>> import sympy
>>> from sympy import Point3D
>>> from sympy.abc import L
>>> from sympy.geometry import Line3D, Segment3D
>>> L = Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L
Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L.points
(Point3D(2, 3, 4), Point3D(3, 5, 1))
"""
def __new__(cls, p1, pt=None, direction_ratio=[], **kwargs):
if isinstance(p1, LinearEntity3D):
p1, pt = p1.args
else:
p1 = Point3D(p1)
if pt is not None and len(direction_ratio) == 0:
try:
pt = Point3D(pt)
except NotImplementedError:
raise ValueError('The 2nd argument was not a valid Point. '
'If it was the direction_ratio of the desired line, enter it '
'with keyword "direction_ratio".')
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError('A 2nd Point or keyword "direction_ratio" must '
'be used.')
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of line. Gives
values that will produce a line that is +/- 5 units long (where a
unit is the distance between the two points that define the line).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.plot_interval()
[t, -5, 5]
"""
t = _symbol(parameter)
return [t, -5, 5]
def equation(self, x='x', y='y', z='z', k='k'):
"""The equation of the line in 3D
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
z : str, optional
The name to use for the x-axis, default value is 'z'.
Returns
=======
equation : tuple
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 0)
>>> l1 = Line3D(p1, p2)
>>> l1.equation()
(x/4 - 1/4, y/3, zoo*z, k)
"""
x, y, z, k = _symbol(x), _symbol(y), _symbol(z), _symbol(k)
p1, p2 = self.points
a = p1.direction_ratio(p2)
return (((x - p1.x)/a[0]), ((y - p1.y)/a[1]),
((z - p1.z)/a[2]), k)
def contains(self, o):
"""Return True if o is on this Line, or False otherwise.
Examples
========
>>> from sympy import Line3D
>>> a = (0, 0, 0)
>>> b = (1, 1, 1)
>>> c = (2, 2, 2)
>>> l1 = Line3D(a, b)
>>> l2 = Line3D(b, a)
>>> l1 == l2
False
>>> l1 in l2
True
"""
if is_sequence(o):
o = Point3D(o)
if isinstance(o, Point3D):
sym = list(map(Dummy, 'xyz'))
eq = self.equation(*sym)
a = [eq[i].subs(sym[i], o.args[i]) for i in range(3)]
a = [i for i in a if i != nan]
if len(a) == 1:
return True
first = a.pop(0)
for i in a:
rv = first.equals(i)
if not rv:
return rv
return True
elif not isinstance(o, LinearEntity3D):
return False
elif isinstance(o, Line3D):
return all(i in self for i in o.points)
def distance(self, o):
"""
Finds the shortest distance between a line and a point.
Raises
======
NotImplementedError is raised if o is not an instance of Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 1)
>>> s = Line3D(p1, p2)
>>> s.distance(Point3D(-1, 1, 1))
2*sqrt(6)/3
>>> s.distance((-1, 1, 1))
2*sqrt(6)/3
"""
if not isinstance(o, Point3D):
if is_sequence(o):
o = Point3D(o)
if o in self:
return S.Zero
a = self.perpendicular_segment(o).length
return a
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Line3D):
return False
return Point3D.are_collinear(self.p1, other.p1, self.p2, other.p2)
class Ray3D(LinearEntity3D):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point3D
The source of the Ray
p2 : Point or a direction vector
direction_ratio: Determines the direction in which the Ray propagates.
Attributes
==========
source
xdirection
ydirection
zdirection
See Also
========
sympy.geometry.point3d.Point3D, Line3D
Examples
========
>>> import sympy
>>> from sympy import Point3D, pi
>>> from sympy.abc import r
>>> from sympy.geometry import Ray3D
>>> r = Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r
Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.points
(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.source
Point3D(2, 3, 4)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.direction_ratio
[1, 2, -4]
"""
def __new__(cls, p1, pt=None, direction_ratio=[], **kwargs):
if isinstance(p1, LinearEntity3D):
p1, pt = p1.args
else:
p1 = Point3D(p1)
if pt is not None and len(direction_ratio) == 0:
try:
pt = Point3D(pt)
except NotImplementedError:
raise ValueError('The 2nd argument was not a valid Point. '
'If it was the direction_ratio of the desired line, enter it '
'with keyword "direction_ratio".')
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError('A 2nd Point or keyword "direction_ratio" must'
'be used.')
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
@property
def source(self):
"""The point from which the ray emanates.
See Also
========
sympy.geometry.point3d.Point3D
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 1, 5)
>>> r1 = Ray3D(p1, p2)
>>> r1.source
Point3D(0, 0, 0)
"""
return self.p1
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, -1, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
@property
def zdirection(self):
"""The z direction of the ray.
Positive infinity if the ray points in the positive z direction,
negative infinity if the ray points in the negative z direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
>>> r2.zdirection
0
"""
if self.p1.z < self.p2.z:
return S.Infinity
elif self.p1.z == self.p2.z:
return S.Zero
else:
return S.NegativeInfinity
def distance(self, o):
"""
Finds the shortest distance between the ray and a point.
Raises
======
NotImplementedError is raised if o is not a Point
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 2)
>>> s = Ray3D(p1, p2)
>>> s.distance(Point3D(-1, -1, 2))
sqrt(6)
>>> s.distance((-1, -1, 2))
sqrt(6)
"""
if not isinstance(o, Point3D):
if is_sequence(o):
o = Point3D(o)
if o in self:
return S.Zero
s = self.perpendicular_segment(o)
if not isinstance(s, Point3D):
non_o = s.p1 if s.p1 == o else s.p2
if self.contains(non_o):
return Line3D(self).distance(o) # = s.length but simpler
# the following applies when neither of the above apply
return self.source.distance(o)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ray. Gives
values that will produce a ray that is 10 units long (where a unit is
the distance between the two points that define the ray).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point3D, Ray3D, pi
>>> r = Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 1))
>>> r.plot_interval()
[t, 0, 10]
"""
t = _symbol(parameter)
return [t, 0, 10]
def contains(self, o):
"""Is other GeometryEntity contained in this Ray?"""
if isinstance(o, Ray3D):
return (Point3D.are_collinear(self.p1, self.p2, o.p1, o.p2) and
self.xdirection == o.xdirection and
self.ydirection == o.ydirection and
self.zdirection == o.zdirection)
elif isinstance(o, Segment3D):
return o.p1 in self and o.p2 in self
elif is_sequence(o):
o = Point3D(o)
if isinstance(o, Point3D):
if Point3D.are_collinear(self.p1, self.p2, o):
if self.xdirection is S.Infinity:
rv = o.x >= self.source.x
elif self.xdirection is S.NegativeInfinity:
rv = o.x <= self.source.x
elif self.ydirection is S.Infinity:
rv = o.y >= self.source.y
elif self.ydirection is S.NegativeInfinity:
rv = o.y <= self.source.y
elif self.zdirection is S.Infinity:
rv = o.z <= self.source.z
else:
rv = o.z <= self.source.z
if rv == True or rv == False:
return bool(rv)
raise Undecidable(
'Cannot determine if %s is in %s' % (o, self))
else:
# Points are not collinear, so the rays are not parallel
# and hence it is impossible for self to contain o
return False
# No other known entity can be contained in a Ray
return False
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Ray3D):
return False
return self.source == other.source and other.p2 in self
class Segment3D(LinearEntity3D):
"""A undirected line segment in a 3D space.
Parameters
==========
p1 : Point3D
p2 : Point3D
Attributes
==========
length : number or sympy expression
midpoint : Point3D
See Also
========
sympy.geometry.point.Point3D, Line3D
Examples
========
>>> import sympy
>>> from sympy import Point3D
>>> from sympy.abc import s
>>> from sympy.geometry import Segment3D
>>> Segment3D((1, 0, 0), (1, 1, 1)) # tuples are interpreted as pts
Segment3D(Point3D(1, 0, 0), Point3D(1, 1, 1))
>>> s = Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s
Segment3D(Point3D(1, 1, 7), Point3D(4, 3, 9))
>>> s.points
(Point3D(1, 1, 7), Point3D(4, 3, 9))
>>> s.length
sqrt(17)
>>> s.midpoint
Point3D(5/2, 2, 8)
"""
def __new__(cls, p1, p2, **kwargs):
# Reorder the two points under the following ordering:
# if p1.x != p2.x then p1.x < p2.x
# if p1.x == p2.x then p1.y < p2.y
# The z-coordinate will not come into picture while ordering
p1 = Point3D(p1)
p2 = Point3D(p2)
if p1 == p2:
return Point3D(p1)
if (p1.x > p2.x) == True:
p1, p2 = p2, p1
elif (p1.x == p2.x) == True and (p1.y > p2.y) == True:
p1, p2 = p2, p1
return LinearEntity3D.__new__(cls, p1, p2, **kwargs)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Segment gives
values that will produce the full segment in a plot.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 0)
>>> s1 = Segment3D(p1, p2)
>>> s1.plot_interval()
[t, 0, 1]
"""
t = _symbol(parameter)
return [t, 0, 1]
@property
def length(self):
"""The length of the line segment.
See Also
========
sympy.geometry.point3d.Point3D.distance
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.length
sqrt(34)
"""
return Point3D.distance(self.p1, self.p2)
@property
def midpoint(self):
"""The midpoint of the line segment.
See Also
========
sympy.geometry.point3d.Point3D.midpoint
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.midpoint
Point3D(2, 3/2, 3/2)
"""
return Point3D.midpoint(self.p1, self.p2)
def distance(self, o):
"""
Finds the shortest distance between a line segment and a point.
Raises
======
NotImplementedError is raised if o is not a Point3D
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 3), Point3D(1, 1, 4)
>>> s = Segment3D(p1, p2)
>>> s.distance(Point3D(10, 15, 12))
sqrt(341)
>>> s.distance((10, 15, 12))
sqrt(341)
"""
if is_sequence(o):
o = Point3D(o)
if isinstance(o, Point3D):
seg_vector = self.p2 - self.p1
pt_vector = o - self.p1
t = seg_vector.dot(pt_vector)/self.length**2
if t >= 1:
distance = Point3D.distance(self.p2, o)
elif t <= 0:
distance = Point3D.distance(self.p1, o)
else:
distance = Point3D.distance(
self.p1 + Point3D(t*seg_vector.x, t*seg_vector.y,
t*seg_vector.y), o)
return distance
raise NotImplementedError()
def contains(self, other):
"""
Is the other GeometryEntity contained within this Segment?
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 1, 1), Point3D(3, 4, 5)
>>> s = Segment3D(p1, p2)
>>> s2 = Segment3D(p2, p1)
>>> s.contains(s2)
True
"""
if is_sequence(other):
other = Point3D(other)
if isinstance(other, Segment3D):
return other.p1 in self and other.p2 in self
elif isinstance(other, Point3D):
if Point3D.are_collinear(self.p1, self.p2, other):
if other.distance(self.p1) + other.distance(self.p2) == self.length:
return True
else:
return False
return False
| {
"content_hash": "d2b8cbb76055f2a1554b837a91633c8e",
"timestamp": "",
"source": "github",
"line_count": 1474,
"max_line_length": 84,
"avg_line_length": 27.6763907734057,
"alnum_prop": 0.4894472361809045,
"repo_name": "AunShiLord/sympy",
"id": "fad862cde3195771bdb004afc862a01e2e10391d",
"size": "40795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/geometry/line3d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13716936"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
details = {'email': 'jiding7@gmail.com', 'password': '', 'recipient': 'dingji97@outlook.com'}
| {
"content_hash": "f95a7e06bda8d5298dd9d318abf48815",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 93,
"avg_line_length": 94,
"alnum_prop": 0.6595744680851063,
"repo_name": "fomhko/courseHunter",
"id": "a9318930de56884c77a230eb60e9539d0ce73ba3",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "passwords.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "38"
},
{
"name": "Python",
"bytes": "2549"
}
],
"symlink_target": ""
} |
import os
import sys
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Dict, List, Optional
from absl import logging
from blessings import Terminal
from googleapiclient import discovery
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import Session, sessionmaker
import caliban.config.experiment as ce
import caliban.util.auth as ua
from caliban.history.types import (ContainerSpec, Experiment, ExperimentGroup,
Job, JobSpec, JobStatus, Platform, init_db)
from caliban.platform.cloud.types import JobStatus as CloudStatus
from caliban.platform.gke.cluster import Cluster
from caliban.platform.gke.types import JobStatus as GkeStatus
from caliban.platform.gke.util import default_credentials
DB_URL_ENV = 'CALIBAN_DB_URL'
MEMORY_DB_URL = 'sqlite:///:memory:'
SQLITE_FILE_DB_URL = 'sqlite:///~/.caliban/caliban.db'
t = Terminal()
# ----------------------------------------------------------------------------
def _create_sqa_engine(
url: str = SQLITE_FILE_DB_URL,
echo: bool = False,
) -> Engine:
'''creates a sqlalchemy Engine instance
Args:
url: url of database
echo: if True, will echo all SQL commands to terminal
Returns:
sqlalchemy Engine instance
'''
# this is a local sqlite db
if url.startswith('sqlite:///') and url != 'sqlite:///:memory:':
path, db = os.path.split(url.replace('sqlite:///', ''))
path = os.path.expanduser(path)
os.makedirs(path, exist_ok=True)
full_path = os.path.join(path, db)
url = f'sqlite:///{full_path}'
engine = create_engine(url, echo=echo)
init_db(engine)
return engine
# ----------------------------------------------------------------------------
def get_mem_engine(echo: bool = False) -> Engine:
'''gets a sqlalchemy engine connection to an in-memory sqlite instance
Args:
echo: if True, will echo all SQL commands to terminal
Returns:
sqlalchemy Engine instance
'''
return _create_sqa_engine(url=MEMORY_DB_URL, echo=echo)
# ----------------------------------------------------------------------------
def get_sql_engine(
url: Optional[str] = None,
strict=False,
echo: bool = False,
) -> Engine:
'''gets a sqlalchemy Engine instance
Args:
url: url of database, if None, uses DB_URL_ENV environment variable or
SQLITE_FILE_DB_URL as fallbacks, in that order
strict: if True, won't attempt to fall back to local or memory engines.
echo: if True, will echo all SQL commands to terminal
Returns:
sqlalchemy Engine instance
'''
if url is None:
url = os.environ.get(DB_URL_ENV) or SQLITE_FILE_DB_URL
try:
return _create_sqa_engine(url=url, echo=echo)
except (OperationalError, OSError) as e:
logging.error("")
logging.error(
t.red(
f"Caliban failed to connect to its experiment tracking database! Details:"
))
logging.error("")
logging.error(t.red(str(e)))
logging.error(t.red(f"Caliban attempted to connect to '{url}'."))
logging.error(t.red(f"Try setting a different URL using ${DB_URL_ENV}."))
logging.error("")
if strict:
sys.exit(1)
else:
# For now, we allow two levels of fallback. The goal is to make sure that
# the job can proceed, no matter what.
#
# If you specify a custom URL, Caliban will fall back to the local
# default database location. If that fails, Caliban will attempt once
# more using an in-memory instance of SQLite. The only reason that should
# fail is if your system doesn't support SQLite at all.
if url == SQLITE_FILE_DB_URL:
logging.warning(
t.yellow(f"Attempting to proceed with in-memory database."))
# TODO when we add our strict flag, bail here and don't even allow
# in-memory.
logging.warning(
t.yellow(
f"WARNING! This means that your job's history won't be accessible "
f"via any of the `caliban history` commands. Proceed at your future self's peril."
))
return get_sql_engine(url=MEMORY_DB_URL, strict=True, echo=echo)
logging.info(
t.yellow(f"Falling back to local sqlite db: {SQLITE_FILE_DB_URL}"))
return get_sql_engine(url=SQLITE_FILE_DB_URL, strict=False, echo=echo)
# ----------------------------------------------------------------------------
@contextmanager
def session_scope(engine: Engine) -> Session:
'''returns a sqlalchemy session using the provided engine
This contextmanager commits all pending session changes on scope exit,
and on an exception rolls back pending changes. The returned session is
closed on final scope exit.
Args:
engine: sqlalchemy engine
Returns:
Session
'''
session = sessionmaker(bind=engine)()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def generate_container_spec(
session: Session,
docker_args: Dict[str, Any],
image_tag: Optional[str] = None,
) -> ContainerSpec:
'''generates a container spec
Args:
session: sqlalchemy session
docker_args: args for building docker container
image_tag: if not None, then an existing docker image is used
Returns:
ContainerSpec instance
'''
if image_tag is None:
spec = docker_args
else:
spec = {'image_id': image_tag}
return ContainerSpec.get_or_create(session=session, spec=spec)
def create_experiments(
session: Session,
container_spec: ContainerSpec,
script_args: List[str],
experiment_config: ce.ExpConf,
xgroup: Optional[str] = None,
) -> List[Experiment]:
'''create experiment instances
Args:
session: sqlalchemy session
container_spec: container spec for the generated experiments
script_args: these are extra arguments that will be passed to every job
executed, in addition to the arguments created by expanding out the
experiment config.
experiment_config: dict of string to list, boolean, string or int. Any
lists will trigger a cartesian product out with the rest of the config. A
job will be submitted for every combination of parameters in the experiment
config.
xgroup: experiment group name for the generated experiments
'''
xg = ExperimentGroup.get_or_create(session=session, name=xgroup)
session.add(xg) # this ensures that any new objects get persisted
return [
Experiment.get_or_create(
xgroup=xg,
container_spec=container_spec,
args=script_args,
kwargs=kwargs,
) for kwargs in ce.expand_experiment_config(experiment_config)
]
# ----------------------------------------------------------------------------
def _get_caip_job_name(j: Job) -> str:
'''gets job name for use with caip rest api'''
job_id = j.details['jobId']
project_id = j.details['project_id']
return f'projects/{project_id}/jobs/{job_id}'
# ----------------------------------------------------------------------------
def _get_caip_job_api(credentials_path: Optional[str] = None) -> Any:
credentials = ua.gcloud_credentials(credentials_path)
return discovery.build('ml',
'v1',
cache_discovery=False,
credentials=credentials).projects().jobs()
# ----------------------------------------------------------------------------
def get_caip_job_status(j: Job) -> JobStatus:
'''gets caip job status
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#State
Returns:
JobStatus
'''
CAIP_TO_JOB_STATUS = {
CloudStatus.STATE_UNSPECIFIED: JobStatus.UNKNOWN,
CloudStatus.QUEUED: JobStatus.SUBMITTED,
CloudStatus.PREPARING: JobStatus.SUBMITTED,
CloudStatus.RUNNING: JobStatus.RUNNING,
CloudStatus.SUCCEEDED: JobStatus.SUCCEEDED,
CloudStatus.FAILED: JobStatus.FAILED,
CloudStatus.CANCELLING: JobStatus.RUNNING,
CloudStatus.CANCELLED: JobStatus.STOPPED
}
api = _get_caip_job_api()
job_id = j.details['jobId']
name = _get_caip_job_name(j)
try:
rsp = api.get(name=name).execute()
caip_status = CloudStatus[rsp['state']]
except Exception as e:
logging.error(f'error getting job status for {job_id}')
return JobStatus.UNKNOWN
return CAIP_TO_JOB_STATUS.get(caip_status) or JobStatus.UNKNOWN
# ----------------------------------------------------------------------------
def get_gke_job_name(j: Job) -> str:
'''gets gke job name from Job object'''
return j.details['job']['metadata']['name']
# ----------------------------------------------------------------------------
def get_job_cluster(j: Job) -> Optional[Cluster]:
'''gets the cluster name from a Job object'''
if j.spec.platform != Platform.GKE:
return None
return Cluster.get(name=j.details['cluster_name'],
project_id=j.details['project_id'],
zone=j.details['cluster_zone'],
creds=default_credentials().credentials)
# ----------------------------------------------------------------------------
def get_gke_job_status(j: Job) -> JobStatus:
'''get gke job status
see:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#jobcondition-v1-batch
Returns:
JobStatus
'''
GKE_TO_JOB_STATUS = {
GkeStatus.STATE_UNSPECIFIED: JobStatus.SUBMITTED,
GkeStatus.PENDING: JobStatus.SUBMITTED,
GkeStatus.RUNNING: JobStatus.RUNNING,
GkeStatus.FAILED: JobStatus.FAILED,
GkeStatus.SUCCEEDED: JobStatus.SUCCEEDED,
GkeStatus.UNAVAILABLE: JobStatus.UNKNOWN
}
cluster_name = j.details['cluster_name']
job_name = get_gke_job_name(j)
cluster = get_job_cluster(j)
if cluster is None:
logging.error(f'unable to connect to cluster {cluster_name}, '
f'so unable to update run status')
return JobStatus.UNKNOWN
job_info = cluster.get_job(job_name)
if job_info is None:
logging.error(f'unable to get job info from cluster {cluster_name}, '
f'so unable to update run status')
return JobStatus.UNKNOWN
return GKE_TO_JOB_STATUS[GkeStatus.from_job_info(job_info)]
# ----------------------------------------------------------------------------
def update_job_status(j: Job) -> JobStatus:
'''updates and returns job status
Returns:
current status for this job
'''
if j.status is not None and j.status.is_terminal():
return j.status
if j.spec.platform == Platform.LOCAL:
return j.status
if j.spec.platform == Platform.CAIP:
j.status = get_caip_job_status(j)
return j.status
if j.spec.platform == Platform.GKE:
j.status = get_gke_job_status(j)
return j.status
assert False, "can't get job status for platform {j.platform.name}"
# ----------------------------------------------------------------------------
def _stop_caip_job(j: Job) -> bool:
'''stops a running caip job
see:
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs/cancel
Args:
j: job to stop
Returns:
True on success, False otherwise
'''
api = _get_caip_job_api()
name = _get_caip_job_name(j)
try:
rsp = api.cancel(name=name).execute()
except Exception as e:
logging.error('error stopping CAIP job {name}: {e}')
return False
if rsp != {}:
logging.error('error stopping CAIP job {name}: {pp.format(rsp)}')
return False
return True
# ----------------------------------------------------------------------------
def _stop_gke_job(j: Job) -> bool:
'''stops a running gke job
see:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#delete-job-v1-batch
Args:
j: job to stop
Returns:
True on success, False otherwise
'''
cluster_name = j.details['cluster_name']
job_name = get_gke_job_name(j)
cluster = get_job_cluster(j)
if cluster is None:
logging.error(f'unable to connect to cluster {cluster_name}, '
f'so unable to delete job {job_name}')
return False
status = cluster.delete_job(job_name=job_name)
# gke deletes the job completely, so we can't then query its status later
# thus if the request went through ok, then we mark as stopped
if status:
j.status = JobStatus.STOPPED
return status
# ----------------------------------------------------------------------------
def stop_job(j: Job) -> bool:
'''stops a running job
Args:
j: job to stop
Returns:
True on success, False otherwise
'''
current_status = update_job_status(j)
if current_status not in [JobStatus.RUNNING, JobStatus.SUBMITTED]:
return True
if j.spec.platform == Platform.LOCAL:
return True # local jobs run to completion
if j.spec.platform == Platform.CAIP:
return _stop_caip_job(j)
if j.spec.platform == Platform.GKE:
return _stop_gke_job(j)
return False
# ----------------------------------------------------------------------------
def replace_local_job_spec_image(spec: JobSpec, image_id: str) -> JobSpec:
'''generates a new JobSpec based on an existing one, but replacing the
image id
Args:
spec: job spec used as basis
image_id: new image id
Returns:
new JobSpec
'''
old_image = spec.spec['container']
old_cmd = spec.spec['command']
new_cmd = list(map(lambda x: x if x != old_image else image_id, old_cmd))
return JobSpec.get_or_create(
experiment=spec.experiment,
spec={
'command': new_cmd,
'container': image_id,
},
platform=Platform.LOCAL,
)
# ----------------------------------------------------------------------------
def replace_caip_job_spec_image(spec: JobSpec, image_id: str) -> JobSpec:
'''generates a new JobSpec based on an existing one, but replacing the
image id
Args:
spec: job spec used as basis
image_id: new image id
Returns:
new JobSpec
'''
new_spec = deepcopy(spec.spec)
new_spec['trainingInput']['masterConfig']['imageUri'] = image_id
return JobSpec.get_or_create(experiment=spec.experiment,
spec=new_spec,
platform=Platform.CAIP)
# ----------------------------------------------------------------------------
def replace_gke_job_spec_image(spec: JobSpec, image_id: str) -> JobSpec:
'''generates a new JobSpec based on an existing one, but replacing the
image id
Args:
spec: job spec used as basis
image_id: new image id
Returns:
new JobSpec
'''
new_spec = deepcopy(spec.spec)
for i in range(len(new_spec['template']['spec']['containers'])):
new_spec['template']['spec']['containers'][i]['image'] = image_id
print
return JobSpec.get_or_create(
experiment=spec.experiment,
spec=new_spec,
platform=Platform.GKE,
)
# ----------------------------------------------------------------------------
def replace_job_spec_image(spec: JobSpec, image_id: str) -> JobSpec:
'''generates a new JobSpec based on an existing one, but replacing the
image id
Args:
spec: job spec used as basis
image_id: new image id
Returns:
new JobSpec
'''
if spec.platform == Platform.LOCAL:
return replace_local_job_spec_image(spec=spec, image_id=image_id)
if spec.platform == Platform.CAIP:
return replace_caip_job_spec_image(spec=spec, image_id=image_id)
if spec.platform == Platform.GKE:
return replace_gke_job_spec_image(spec=spec, image_id=image_id)
return None
| {
"content_hash": "fa568b8967c0181c97666bd5ff7774dc",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 98,
"avg_line_length": 28.752310536044362,
"alnum_prop": 0.6124718739954998,
"repo_name": "google/caliban",
"id": "f414d192bd1ce8575abe95445c5560b5ca9ed20a",
"size": "16151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caliban/history/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3885"
},
{
"name": "Makefile",
"bytes": "1478"
},
{
"name": "Python",
"bytes": "547359"
},
{
"name": "Shell",
"bytes": "3071"
},
{
"name": "TeX",
"bytes": "3557"
}
],
"symlink_target": ""
} |
import os.path
import time
from cmsplugin_feed.processors import FEED_PROCESSORS
def test_image_hrefs(parsed_feed):
assert len(parsed_feed) != 0
entries = parsed_feed.get("entries")
for en in entries:
# it can be unicode
if "image" in en:
assert isinstance(en.get("image"), basestring)
def test_invalid_feed():
import cmsplugin_feed.cms_plugins
invalid_path = os.path.join(".", "fixtures", "invalid.xml")
feed = cmsplugin_feed.cms_plugins.fetch_parsed_feed(invalid_path)
assert True, "No exception has been raised"
def test_process_real_feed():
"""
Test that feed processors can work with real case of missing data.
"""
feed = {'entries': [{
'links': [{
'href': u'http://listen.sdpb.org/post/6-pm-ct5-pm-mt-newscast-1',
'type': u'text/html',
'rel': u'alternate'
}],
'title': u'6 pm CT/5 pm MT Newscast',
'author': u'Susan Hanson',
'guidislink': False,
'title_detail': {
'base': u'http://listen.sdpb.org/news/rss.xml',
'type': u'text/plain',
'value': u'6 pm CT/5 pm MT Newscast',
'language': None
},
'link': u'http://listen.sdpb.org/post/6-pm-ct5-pm-mt-newscast-1',
'authors': [{'name': u'Susan Hanson'}],
'author_detail': {'name': u'Susan Hanson'},
'id': u'101789 as http://listen.sdpb.org',
'published': u'Fri, 14 Oct 2016 23:14:19 +0000'
}]}
for processor in FEED_PROCESSORS:
feed = processor(feed)
def test_process_missing_info():
"""
Test that feed processors can work with missing data without raising errors.
"""
invalid_feeds = [
{},
{'entries': []},
{'entries': [2]},
{'entries': ['3']},
]
invalid_entries = [
{},
{'image': 1},
{'image': 'link'},
{'image': {}},
{'image': {}, 'media_thumbnail': 1},
{'image': {}, 'media_content': 2},
{'links': {}},
{'links': 'missing'},
{'links': 2},
{'links': [{}, ]},
{'links': [{'type': 3}, ]},
{'links': [{'type': 'image/jpeg'}, ]},
{'content': 3},
{'content': 'bla', 'summary': '<img>'},
]
for invalid_entry in invalid_entries:
invalid_feeds.append({'entries': [invalid_entry]},)
for invalid_feed in invalid_feeds:
for processor in FEED_PROCESSORS:
invalid_feed = processor(invalid_feed)
| {
"content_hash": "0ba845301d0207664511af0fbc25b94e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 80,
"avg_line_length": 30.023809523809526,
"alnum_prop": 0.5313243457573354,
"repo_name": "pbs/cmsplugin-feed",
"id": "2f7e78f336bf192a69057696fce35bda0384974a",
"size": "2522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_feed/tests/test_processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "973"
},
{
"name": "HTML",
"bytes": "1702"
},
{
"name": "Python",
"bytes": "21620"
}
],
"symlink_target": ""
} |
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from quantum.db.models_v2 import model_base
from quantum.db import model_base
from quantum.db.model_base import BASEV2
class RuijieVlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network"""
__tablename__ = 'ruijie_vlan_allocations'
physical_network = Column(String(64), nullable=False, primary_key=True)
vlan_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
def __repr__(self):
return "<VlanAllocation(%s,%d,%s)>" % (self.physical_network,
self.vlan_id, self.allocated)
class RuijieNetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization"""
__tablename__ = 'ruijie_network_bindings'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = Column(String(32), nullable=False)
physical_network = Column(String(64))
segmentation_id = Column(Integer) # vlan_id
def __init__(self, network_id, network_type, physical_network,
segmentation_id):
self.network_id = network_id
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%s,%d)>" % (self.network_id,
self.network_type,
self.physical_network,
self.segmentation_id)
class RuijieSwitchEthBinding(BASEV2):
"""Represents a binding of Ruijie switch ip, Ruijie switch port id, Ruijie lldp neighbor MAC"""
__tablename__ = 'ruijie_switch_eth_bindings'
__table_args__ = {'extend_existing':True}
ip_address = Column(String(255), primary_key=True)
mac_address = Column(String(255), primary_key=True)
port_id = Column(String(255), primary_key=True)
def __init__(self, ip, mac, port):
self.ip_address = ip
self.mac_address = mac
self.port_id = port
def __repr__(self):
return "<RuijieSwitchEthBinding(%s,%s,%s)>" % (self.ip_address, self.mac_address
, self.port_id)
class RuijieVmEthBinding(BASEV2):
"""Represents a binding of vm and network card"""
__tablename__ = 'ruijie_vm_eth_bindings'
__table_args__ = {'extend_existing':True}
intf_uuid = Column(String(255), primary_key=True)
mac_address = Column(String(255), primary_key=True)
def __init__(self, id, mac):
self.intf_uuid = id
self.mac_address = mac
def __repr__(self):
return "<RuijieVmEthBinding(%s,%s)>" % (self.intf_uuid, self.mac_address)
class RuijieVlanBinding(BASEV2):
"""Represents a ruijie vlan binding"""
__tablename__ = 'ruijie_vlan_bindings'
__table_args__ = {'extend_existing':True}
ip_address = Column(String(255), primary_key=True)
port_id = Column(String(255), primary_key=True)
vlan_id = Column(String(255), primary_key=True)
intf_uuid = Column(String(255), primary_key=True)
def __init__(self, ip, port, vlan, uuid):
self.ip_address = ip
self.port_id = port
self.vlan_id = vlan
self.intf_uuid = uuid
def __repr__(self):
return "<RuijieVlanBinding(%s,%s,%s,%s)>" % (self.ip_address, self.port_id
, self.vlan_id, self.intf_uuid)
class RuijieSwitchSshHostConfig(BASEV2):
"""Represents a config of Ruijie switch ssh server info and user info """
__tablename__ = 'ruijie_switch_ssh_host_config'
__table_args__ = {'extend_existing':True}
host_id = Column(Integer, primary_key=True)
ip_address = Column(String(255), primary_key=True)
port_id = Column(String(255), primary_key=True)
retry_times = Column(Integer)
reconnect_time = Column(Integer)
def __init__(self, id, ip, port, retry, recont):
self.host_id = id
self.ip_address = ip
self.port_id = port
self.retry_times = retry
self.reconnect_time = recont
def __repr__(self):
return "<RuijieSwitchSshHostConfig(%s,%s,%s,%s,%s)>" % (self.host_id, self.ip_address
, self.port_id, self.retry_times
, self.reconnect_time)
class RuijieSwitchSshAuthConfig(BASEV2):
"""Represents a config of Ruijie switch ssh server info and user info """
__tablename__ = 'ruijie_switch_ssh_author_config'
__table_args__ = {'extend_existing':True}
host_id = Column(Integer, primary_key=True)
username = Column(String(255), primary_key=True)
password = Column(String(255), primary_key=True)
def __init__(self, id, user, passwd):
self.host_id = id
self.username = user
self.password = passwd
def __repr__(self):
return "<RuijieSwitchSshAuthConfig(%s,%s,%s)>" % (self.host_id, self.username
, self.password)
| {
"content_hash": "cb4834871adb1f659a50ca989f418db5",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 99,
"avg_line_length": 38.06206896551724,
"alnum_prop": 0.5915926798333031,
"repo_name": "ruijie/quantum",
"id": "ba5ea6336b45e920affd4857ebf346e20423b6e9",
"size": "6247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/plugins/rgos/db/rgos_models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "71602"
},
{
"name": "Perl",
"bytes": "36750"
},
{
"name": "Python",
"bytes": "2684560"
},
{
"name": "Racket",
"bytes": "143"
},
{
"name": "Shell",
"bytes": "8432"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Iterable
from pants.backend.helm.resolve import artifacts
from pants.backend.helm.resolve.artifacts import ThirdPartyHelmArtifactMapping
from pants.backend.helm.subsystems.helm import HelmSubsystem
from pants.backend.helm.target_types import (
AllHelmChartTargets,
HelmChartDependenciesField,
HelmChartMetaSourceField,
HelmChartTarget,
)
from pants.backend.helm.target_types import rules as helm_target_types_rules
from pants.backend.helm.util_rules import chart_metadata
from pants.backend.helm.util_rules.chart_metadata import HelmChartDependency, HelmChartMetadata
from pants.engine.addresses import Address
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
DependenciesRequest,
ExplicitlyProvidedDependencies,
FieldSet,
InferDependenciesRequest,
InferredDependencies,
)
from pants.engine.unions import UnionRule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.ordered_set import OrderedSet
from pants.util.strutil import bullet_list, pluralize
logger = logging.getLogger(__name__)
class DuplicateHelmChartNamesFound(Exception):
def __init__(self, duplicates: Iterable[tuple[str, Address]]) -> None:
super().__init__(
f"Found more than one `{HelmChartTarget.alias}` target using the same chart name:\n\n"
f"{bullet_list([f'{addr} -> {name}' for name, addr in duplicates])}"
)
class UnknownHelmChartDependency(Exception):
def __init__(self, address: Address, dependency: HelmChartDependency) -> None:
super().__init__(
f"Can not find any declared artifact for dependency '{dependency.name}' "
f"declared at `Chart.yaml` in Helm chart at address: {address}"
)
class FirstPartyHelmChartMapping(FrozenDict[str, Address]):
pass
@rule
async def first_party_helm_chart_mapping(
all_helm_chart_tgts: AllHelmChartTargets,
) -> FirstPartyHelmChartMapping:
charts_metadata = await MultiGet(
Get(HelmChartMetadata, HelmChartMetaSourceField, tgt[HelmChartMetaSourceField])
for tgt in all_helm_chart_tgts
)
name_addr_mapping: dict[str, Address] = {}
duplicate_chart_names: OrderedSet[tuple[str, Address]] = OrderedSet()
for meta, tgt in zip(charts_metadata, all_helm_chart_tgts):
if meta.name in name_addr_mapping:
duplicate_chart_names.add((meta.name, name_addr_mapping[meta.name]))
duplicate_chart_names.add((meta.name, tgt.address))
continue
name_addr_mapping[meta.name] = tgt.address
if duplicate_chart_names:
raise DuplicateHelmChartNamesFound(duplicate_chart_names)
return FirstPartyHelmChartMapping(name_addr_mapping)
@dataclass(frozen=True)
class HelmChartDependenciesInferenceFieldSet(FieldSet):
required_fields = (HelmChartMetaSourceField, HelmChartDependenciesField)
source: HelmChartMetaSourceField
dependencies: HelmChartDependenciesField
class InferHelmChartDependenciesRequest(InferDependenciesRequest):
infer_from = HelmChartDependenciesInferenceFieldSet
@rule(desc="Inferring Helm chart dependencies", level=LogLevel.DEBUG)
async def infer_chart_dependencies_via_metadata(
request: InferHelmChartDependenciesRequest,
first_party_mapping: FirstPartyHelmChartMapping,
third_party_mapping: ThirdPartyHelmArtifactMapping,
subsystem: HelmSubsystem,
) -> InferredDependencies:
address = request.field_set.address
# Parse Chart.yaml for explicitly set dependencies.
explicitly_provided_deps, metadata = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(request.field_set.dependencies)),
Get(HelmChartMetadata, HelmChartMetaSourceField, request.field_set.source),
)
remotes = subsystem.remotes()
def resolve_dependency_url(dependency: HelmChartDependency) -> str | None:
if not dependency.repository:
registry = remotes.default_registry
if registry:
return f"{registry.address}/{dependency.name}"
return None
else:
return f"{dependency.repository}/{dependency.name}"
# Associate dependencies in Chart.yaml with addresses.
dependencies: OrderedSet[Address] = OrderedSet()
for chart_dep in metadata.dependencies:
candidate_addrs = []
first_party_dep = first_party_mapping.get(chart_dep.name)
if first_party_dep:
candidate_addrs.append(first_party_dep)
dependency_url = resolve_dependency_url(chart_dep)
third_party_dep = third_party_mapping.get(dependency_url) if dependency_url else None
if third_party_dep:
candidate_addrs.append(third_party_dep)
if not candidate_addrs:
raise UnknownHelmChartDependency(address, chart_dep)
matches = frozenset(candidate_addrs).difference(explicitly_provided_deps.includes)
explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
matches,
address,
context=f"The Helm chart {address} declares `{chart_dep.name}` as dependency",
import_reference="helm dependency",
)
maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)
if maybe_disambiguated:
dependencies.add(maybe_disambiguated)
logger.debug(
f"Inferred {pluralize(len(dependencies), 'dependency')} for target at address: {address}"
)
return InferredDependencies(dependencies)
def rules():
return [
*collect_rules(),
*artifacts.rules(),
*helm_target_types_rules(),
*chart_metadata.rules(),
UnionRule(InferDependenciesRequest, InferHelmChartDependenciesRequest),
]
| {
"content_hash": "d8442f6b2ab5e560f884fe5788961800",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 98,
"avg_line_length": 36.03636363636364,
"alnum_prop": 0.7208207198116381,
"repo_name": "pantsbuild/pants",
"id": "bf5de0b32bb448a7e04c7464a7f41982d4554c62",
"size": "6078",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/helm/dependency_inference/chart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
} |
from __future__ import division
import sys
import pandas as pd
import numpy as np
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import itertools
import os
import statsmodels.api as sm
import matplotlib.pyplot as plt
import statsmodels
import fnmatch
import seaborn as sns
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from sklearn.preprocessing import normalize
import glob
import time
import warnings
# Use os.path.expanduser if you are working on multiple machines, this way relative paths will point to right place
# Use os.path.expanduser if you are working on multiple machines, this way relative paths will point to right place
# Use os.path.expanduser if you are working on multiple machines, this way relative paths will point to right place
os.chdir(os.path.expanduser('~/Dropbox/modmap/behavior/'))
out_path = os.path.expanduser('~/Dropbox/modmap/analysis/')
# Good house keeping, this will delete old figures that you are about to update
for svg in glob.glob(os.path.expanduser('~/Dropbox/modmap/analysis/*.svg')):
os.remove(svg)
# Initialize common dictionaries for group level analyses
# These will be converted to dataframes
# We use a dict since each subject will have a variable number of training days
group_rts = {}
group_acc= {}
group_acfs = {}
group_dict = {'r':'cue', 'c':'response'}
group_set_acfs = {}
# Loop through the two groups (r and c) in the study
for group in ['r', 'c']:
# Generate a list of the subjects that are currently in the study
summary_files = []
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*summary*' + group + '.csv'):
summary_files.append(file)
subs = []
for i in range(0, len(summary_files)):
subs.append(summary_files[i][:4])
uniqueSubs = list(set(subs))
# First loop through subjects and generate individual summary figures for each
for sub in uniqueSubs:
#initialize the dataframes
Acc = pd.DataFrame(columns=('Day', 'randAcc', 'seqAcc'))
RT = pd.DataFrame(columns = ('Day', 'zscoredRT', 'duration'))
sdRT = pd.DataFrame(columns = ('Day', 'sdRTseq','sdRTrand', 'sdRatio'))
lag_names = ['lag' + str(i) for i in range(1,16)]
chunkSizeSeq = pd.DataFrame(columns=('Day', 'chunkSize'))
chunkSizeRand = pd.DataFrame(columns=('Day', 'chunkSize'))
randLags = pd.DataFrame(columns = lag_names)
seqLags = pd.DataFrame(columns = lag_names)
df = pd.DataFrame()
# Find the subjects summary data files
sub_files = []
for file in os.listdir('.'):
if fnmatch.fnmatch(file, sub+'*summary*' + '.csv'):
sub_files.append(file)
# populate the dataframes containing each summary statistic for each day
for day in sub_files:
df = pd.read_csv(day)
this_day = int(day[day.find('session')+8:-12])
Acc.loc[this_day] = [this_day,df['accuracy'][5], df['accuracy'][6]]
zscoreRT = (df['rt_all'][5] - df['rt_all'][6])/df['sdRT'][5]
RT.loc[this_day] = [this_day, zscoreRT, df['rt_all'][6]]
sdRT.loc[this_day] = [this_day, df['sdRT'][6],df['sdRT'][5], df['sdRT'][6]/df['sdRT'][5]]
randLags.loc[this_day] = df[lag_names].loc[6]
seqLags.loc[this_day] = df[lag_names].loc[6]
# Sort so that dataframes are ordered by day
randLags = randLags.sort_index(axis=0)
seqLags = seqLags.sort_index(axis=0)
# Setting the context in this way will make your figure font size appear properly on a standard paper e.g for a journal submission.
sns.set_context(context='paper', font_scale=2.0)
# this is the setting you want
sns.set_style("white", {'axes.linewidth':0.0000001, 'axes.edgecolor':'black'})
#Set up one big figure for each panel and then add subplots to that figure (Panel A, B and so on)
fig = plt.figure(figsize=(8,12))
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
#Generate Accuracy Plots for the Sequence
plt.subplot(331) # Specify which subplot to write to
Acc.sort_values(by=['Day'], ascending = [True], inplace=True)
sns.regplot('Day', 'seqAcc',data=Acc, fit_reg=False, ax=ax1, scatter_kws={'s':40})
plt.axis([1,10,.5,1])
plt.xticks(np.arange(1,10.1,1))
plt.ylabel('Accuracy')
plt.xlabel('Day')
plt.title('(a)', loc='left', y = 1.1, x = -0.35)
plt.grid(linestyle='dotted')
#Generate RT plots
plt.subplot(332)
RT.sort_values(by=['Day'], ascending = [True], inplace=True)
sns.regplot('Day', 'zscoredRT',data=RT, fit_reg=False, ax=ax2, scatter_kws={'s':40})
plt.axis([1,10,0,6])
plt.xticks(np.arange(1,10.1,1))
plt.ylabel('Reaction Time (z-units)')
plt.xlabel('Day')
plt.title('(b)', loc='left',y = 1.1, x = -0.35)
plt.grid(linestyle='dotted')
# Skill plot
plt.subplot(333) #
Acc['errorRate'] = 1 - Acc['seqAcc']
Acc.loc[Acc.errorRate == 0, 'errorRate'] = 0.01
Acc['skill'] = 10*(1-Acc['errorRate'])/(Acc['errorRate']*np.log(1000*RT['duration'])**5)
RT.sort_values(by=['Day'], ascending = [True], inplace=True)
sns.regplot('Day', 'skill',data=Acc, fit_reg=False, ax=ax3, scatter_kws={'s':40})
plt.axis([1,10,-300,200])
plt.xticks(np.arange(1,10.1,1))
plt.ylabel('Skill')
plt.xlabel('Day')
plt.title('(c)', loc='left',y = 1.1, x = -0.35)
plt.grid(linestyle='dotted')
# add subplots for panels C and D
fig.add_subplot(312)
fig.add_subplot(313)
plt.subplot(312)
blues = plt.get_cmap('Blues')
# Autocorrelation for random trials
for i in range(1,len(randLags)+1):
colorBlue = blues(.05 + float(i)/(len(randLags)+1))
plt.plot(range(1,16),randLags.loc[i], color = colorBlue, label = 'Day ' + str(i))
box = plt.gca().get_position()
plt.gca().set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.xlabel('Lag (Trials)')
plt.ylabel('Autocorrelation')
legend = plt.legend(loc='upper right', ncol=1, prop={'size':8}, title='Training Day')
plt.setp(legend.get_title(),fontsize='xx-small')
plt.title('(c)', y=0.9, loc='left', x = -0.15)
plt.axis([1,15, -0.5,.75])
plt.xticks(np.arange(1,16,1))
plt.yticks(np.arange(-.5,.76,0.25))
plt.grid(linestyle='dotted')
plt.subplot(313)
greens = plt.get_cmap('Greens')
for i in range(1,len(randLags)+1):
colorGreen = greens(.05 + float(i)/(len(randLags)+1))
plt.plot(range(1,16),seqLags.loc[i], color = colorGreen, label = 'Day ' + str(i))
box = plt.gca().get_position()
plt.gca().set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.xlabel('Lag (Trials)')
plt.ylabel('Autocorrelation' )
legend = plt.legend(loc='upper right', ncol=1, prop={'size':8}, title='Training Day')
plt.setp(legend.get_title(),fontsize='xx-small')
plt.title('(d)', y=0.9, loc='left', x = -0.15)
plt.axis([1,15, -0.5,.75])
plt.xticks(np.arange(1,16,1))
plt.yticks(np.arange(-.5,.76,0.25))
plt.grid(linestyle='dotted')
#save figure
ind_plot_fn = out_path + day[:4] + '_summary_Days1-' + str(len(Acc))+ '.svg'
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
fig.savefig(ind_plot_fn, rasterize=True)
plt.close('all')
#Generate group plots:
seqLags['sid']= day[:4]
seqLags['group']= group_dict[group]
RT['sid'] = day[:4]
RT['group'] = group_dict[group]
Acc['sid'] = day[:4]
Acc['group'] = group_dict[group]
group_acfs[day[:4]] = seqLags
group_rts[day[:4]] = RT
group_acc[day[:4]] = Acc
first_set_acfs = RT.copy()
del first_set_acfs['zscoredRT']
first_set_acfs['firstset']=seqLags[['lag1', 'lag2', 'lag3']].mean(axis=1)
first_set_acfs['seq_index']=seqLags[['lag4']]
first_set_acfs['group'] = group_dict[group]
group_set_acfs[day[:4]] = first_set_acfs
# Now generate the up to date group summary figures
sns.set_context(context='paper', font_scale=2.0)
sns.set_style("white", {'axes.linewidth':0.0001, 'axes.edgecolor':'black'})
fig = plt.figure(figsize=(8,15))
#Generate group accuracy plots
plt.subplot(511)
result = pd.concat(group_acc.values())
result['Day'] = result.index
result.drop([])
df = pd.melt(result, id_vars=['Day', 'sid', 'group'], value_vars = 'seqAcc')
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
ax = sns.boxplot(x='Day', y='value',hue='group',data=df,palette="PRGn")
ax.set(xlabel='Day', ylabel='Accuracy')
plt.grid(linestyle='dotted')
plt.axis([0,10,0,1])
#Generate grup response time plots
plt.subplot(512)
result = pd.concat(group_rts.values())
result['Day'] = result.index
df = pd.melt(result, id_vars=['Day', 'sid', 'group'], value_vars = 'zscoredRT')
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
ax = sns.boxplot(x='Day', y='value',hue='group',data=df,palette="PRGn")
ax.set(xlabel='Day', ylabel='RT (z-units)')
plt.legend(loc='upper left')
plt.grid(linestyle='dotted')
#Generate group skill plots
plt.subplot(513)
result = pd.concat(group_acc.values())
result['Day'] = result.index
df = pd.melt(result, id_vars=['Day', 'sid', 'group'], value_vars = 'skill')
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
ax = sns.boxplot(x="Day", y="value",hue='group', data=df,palette="PRGn");
ax.set(xlabel='Day', ylabel='skill (a.u.)')
plt.grid(linestyle='dotted')
plt.legend(loc='upper left')
#Generate group summaries autocorrelation
plt.subplot(514)
result = pd.concat(group_acfs.values())
result = result.loc[result['group']=='response']
result['day'] = result.index
lag_names = ['lag' + str(i) for i in range(1,16)]
df = pd.melt(result, id_vars=["day", 'sid'], value_vars = lag_names)
df['variable'] = df['variable'].map(lambda x: x.lstrip('lag').rstrip('aAbBcC'))
df = df.apply(pd.to_numeric, errors='coerce')
ax = sns.tsplot(time='variable', value='value',unit='sid', condition="day",data=df,interpolate=True, ci=68,color=sns.light_palette("green", n_colors=10))
ax.set(xlabel='Lag (Trial)', ylabel='Autocorrelation')
legend = ax.legend(loc='upper right', ncol=1, prop={'size':8}, title='Training Day')
plt.setp(legend.get_title(),fontsize='xx-small')
plt.axis([1,15, -0.25,.5])
plt.xticks(np.arange(1,16,1))
plt.yticks(np.arange(-.25,.51,0.25))
plt.grid(linestyle='dotted')
plt.subplot(515)
result = pd.concat(group_acfs.values())
result = result.loc[result['group']=='cue']
result['day'] = result.index
lag_names = ['lag' + str(i) for i in range(1,16)]
df = pd.melt(result, id_vars=["day", 'sid'], value_vars = lag_names)
df['variable'] = df['variable'].map(lambda x: x.lstrip('lag').rstrip('aAbBcC'))
df = df.apply(pd.to_numeric, errors='coerce')
ax = sns.tsplot(time='variable', value='value',unit='sid', condition="day",data=df,interpolate=True, ci=68,color=sns.light_palette("purple", n_colors=10))
ax.set(xlabel='Lag (Trial)', ylabel='Autocorrelation')
legend = ax.legend(loc='upper right', ncol=1, prop={'size':8}, title='Training Day')
plt.setp(legend.get_title(),fontsize='xx-small')
plt.axis([1,15, -0.25,.5])
plt.xticks(np.arange(1,16,1))
plt.yticks(np.arange(-.25,.51,0.25))
plt.grid(linestyle='dotted')
plt.tight_layout(pad=0.2, w_pad=0.2, h_pad=.5)
group_plot_fn = out_path + 'group_performance.svg'
plt.savefig(group_plot_fn)
plt.close('all')
#Send the updated results to email
fromaddr = 'beuk.pat@gmail.com'
toaddrs = 'beuk.pat@gmail.com'
msg = 'modmap_update'
msg = MIMEMultipart()
msg['Subject'] = 'modmap_update'
msg['From'] = 'beuk.pat@gmail.com'
msg['To'] = 'beuk.pat@gmail.com'
msg.preamble = ''
username = 'beuk.pat'
password = ''
#Grab the group summary figures
for file in glob.glob(out_path + 'group*.svg'):
fp = open(file, 'rb')
img = MIMEImage(fp.read(), name=os.path.basename(file), _subtype="svg")
fp.close()
msg.attach(img)
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg.as_string())
server.quit()
| {
"content_hash": "5f44f77020926f9287892ebbc836b968",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 154,
"avg_line_length": 39.671875,
"alnum_prop": 0.6301693580149665,
"repo_name": "pbeukema/rsaRemap",
"id": "144973b79edb395f059b0afd6c83f0e8d711e28c",
"size": "12695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/analysis/BehavioralDataProcessing/modmap_autopilot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186069"
}
],
"symlink_target": ""
} |
from .action import *
from .combat import *
from .control import *
from .entity import *
from .movement import *
from .skill import *
| {
"content_hash": "9b0d474c365d89d0d2bb3ba2a6e3a324",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 23,
"avg_line_length": 19.285714285714285,
"alnum_prop": 0.725925925925926,
"repo_name": "Kromey/roglick",
"id": "4e613b7d9241018dce27a4b30eb80c9ed88ef3ea",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roglick/events/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107063"
}
],
"symlink_target": ""
} |
import jsonschema
from rally.env import env_mgr
from rally.env import platform
from tests.unit import test
class PlatformBaseTestCase(test.TestCase):
def _check_schema(self, schema, obj):
jsonschema.validate(obj, schema)
def _check_health_schema(self, obj):
self._check_schema(env_mgr.EnvManager._HEALTH_FORMAT, obj)
def _check_cleanup_schema(self, obj):
self._check_schema(env_mgr.EnvManager._CLEANUP_FORMAT, obj)
def _check_info_schema(self, obj):
self._check_schema(env_mgr.EnvManager._INFO_FORMAT, obj)
class PlatformTestCase(test.TestCase):
def test_plugin_configure_and_methods(self):
@platform.configure(name="existing", platform="foo")
class FooPlugin(platform.Platform):
pass
self.addCleanup(FooPlugin.unregister)
f = FooPlugin("spec", "uuid", "plugin_data", "platform_data", "status")
self.assertEqual(f.uuid, "uuid")
self.assertEqual(f.spec, "spec")
self.assertEqual(f.plugin_data, "plugin_data")
self.assertEqual(f.platform_data, "platform_data")
self.assertEqual(f.status, "status")
self.assertRaises(NotImplementedError, f.create)
self.assertRaises(NotImplementedError, f.destroy)
self.assertRaises(NotImplementedError, f.update, "new_spec")
self.assertRaises(NotImplementedError, f.cleanup)
self.assertRaises(NotImplementedError,
f.cleanup, task_uuid="task_uuid")
self.assertRaises(NotImplementedError, f.check_health)
self.assertRaises(NotImplementedError, f.info)
| {
"content_hash": "ef89bacb15e4d5464ee3737fe7fba2ca",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 33.625,
"alnum_prop": 0.677819083023544,
"repo_name": "openstack/rally",
"id": "0ca99a0bf7bb1f64bbde68cc1943762944ef3dd2",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/env/test_platform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1245"
},
{
"name": "HTML",
"bytes": "70138"
},
{
"name": "JavaScript",
"bytes": "10234"
},
{
"name": "Mako",
"bytes": "1182"
},
{
"name": "Python",
"bytes": "2254050"
},
{
"name": "Shell",
"bytes": "6966"
}
],
"symlink_target": ""
} |
from pkgutil import walk_packages
import os
import os.path
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
for k in lst:
globals()[k] = p.__dict__[k]
for _, module_name, _ in walk_packages(
[os.path.dirname(__file__)]):
if not module_name.startswith('_'):
global_import(module_name)
| {
"content_hash": "e25135528dec07fed3896fc847b7806b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 54,
"avg_line_length": 27,
"alnum_prop": 0.5876543209876544,
"repo_name": "czhu95/ternarynet",
"id": "b5895a7d0fcee47cdd5cf85019a57f3a083c1b52",
"size": "493",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorpack/dataflow/dataset/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "293419"
}
],
"symlink_target": ""
} |
from glob import glob
from pathlib import Path
from pluginsmanager.observer.autosaver.persistence import Persistence
from pluginsmanager.util.persistence_decoder import PersistenceDecoder
class BanksFiles(object):
def __init__(self, data_path):
"""
:param Path data_path: Path that contains the banks
"""
self.data_path = data_path
def load(self, system_effect):
"""
Return a list if banks presents in data_path
:param SystemEffect system_effect: SystemEffect used in pedalboards
:return list[Bank]: List with Banks persisted in
:attr:`~pluginsmanager.observer.autosaver.banks_files.BanksFiles.data_path`
"""
persistence = PersistenceDecoder(system_effect)
banks = []
for file in glob(str(self.data_path) + "/*.json"):
bank = persistence.read(Persistence.read(file))
bank._uuid = file.split('/')[-1].split('.json')[0]
banks.append(bank)
return banks
def save(self, banks_manager):
for bank in banks_manager:
self.save_bank(bank)
def save_bank(self, bank):
"""
Save the bank in your file
:param Bank bank: Bank that will be persisted
"""
path = self._bank_path(bank)
Persistence.save(path, bank.json)
def delete_bank(self, bank):
"""
Delete the bank's file
:param Bank bank: Bank that will be removed
"""
path = self._bank_path(bank)
Persistence.delete(path)
def delete_all_banks(self):
"""
Delete all banks files.
Util for manual save, because isn't possible know which banks
were removed
"""
for file in glob(str(self.data_path) + "/*.json"):
Persistence.delete(file)
def _bank_path(self, bank):
"""
:param Bank bank: Bank that will be generate your path
:return string: Bank path .json
"""
return self.data_path / Path('{}.json'.format(bank.uuid))
| {
"content_hash": "77fce67509734cbf28eb211148b92ad5",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 91,
"avg_line_length": 28.328767123287673,
"alnum_prop": 0.5957446808510638,
"repo_name": "PedalPi/PluginsManager",
"id": "58745e7281b31c86920af2d3366f782243c36b92",
"size": "2645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pluginsmanager/observer/autosaver/banks_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2251"
},
{
"name": "Python",
"bytes": "352748"
}
],
"symlink_target": ""
} |
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
__all__ = ["LinearOperatorFullMatrix"]
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.apply(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `apply` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.apply(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float32`, `float64`, `complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
# Special treatment for (real) Symmetric Positive Definite.
self._is_spd = (
(not self._matrix.dtype.is_complex)
and is_self_adjoint and is_positive_definite)
if self._is_spd:
self._chol = linalg_ops.cholesky(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _apply(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _determinant(self):
if self._is_spd:
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self._matrix)
def _log_abs_determinant(self):
if self._is_spd:
diag = array_ops.matrix_diag_part(self._chol)
return 2 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1])
abs_det = math_ops.abs(self.determinant())
return math_ops.log(abs_det)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linear_operator_util.matrix_adjoint(rhs) if adjoint_arg else rhs
if self._is_spd:
return linalg_ops.cholesky_solve(self._chol, rhs)
return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
def _to_dense(self):
return self._matrix
| {
"content_hash": "4a3b2bf808c9cd90cde691026789d00d",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 80,
"avg_line_length": 35.5945945945946,
"alnum_prop": 0.6522399392558846,
"repo_name": "LUTAN/tensorflow",
"id": "64ab561457789776003ce56038c47ca32dacffdd",
"size": "7274",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/linalg/python/ops/linear_operator_full_matrix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182478"
},
{
"name": "C++",
"bytes": "23299205"
},
{
"name": "CMake",
"bytes": "158302"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "801882"
},
{
"name": "HTML",
"bytes": "595939"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "13994"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "213845"
},
{
"name": "Python",
"bytes": "20322070"
},
{
"name": "Shell",
"bytes": "334983"
},
{
"name": "TypeScript",
"bytes": "796600"
}
],
"symlink_target": ""
} |
""" This provides some useful code used by other modules. This is not to be
used by the end user which is why it is hidden. """
import string, sys
class LinkError(Exception):
pass
def refine_import_err(mod_name, extension_name, exc):
""" Checks to see if the ImportError was because the library
itself was not there or because there was a link error. If there
was a link error it raises a LinkError if not it does nothing.
Keyword arguments
-----------------
- mod_name : The name of the Python module that was imported.
- extension_name : The name of the extension module that is to be
imported by the module having mod_name.
- exc : The exception raised when the module called mod_name was
imported.
To see example usage look at __init__.py.
"""
try:
del sys.modules['vtk.%s'%mod_name]
except KeyError:
pass
if string.find(str(exc), extension_name) == -1:
raise LinkError, str(exc)
| {
"content_hash": "a2cf4a623bfb517d59b0f5c1b3b0bca6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 29.727272727272727,
"alnum_prop": 0.6636085626911316,
"repo_name": "Wuteyan/VTK",
"id": "7529c2f4906487813e8d22cce8a86189b760306d",
"size": "981",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Wrapping/Python/vtk/__helper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37780"
},
{
"name": "C",
"bytes": "29104737"
},
{
"name": "C++",
"bytes": "49239988"
},
{
"name": "CMake",
"bytes": "926259"
},
{
"name": "GLSL",
"bytes": "203156"
},
{
"name": "Groff",
"bytes": "48168"
},
{
"name": "HTML",
"bytes": "275802"
},
{
"name": "Java",
"bytes": "64945"
},
{
"name": "Lex",
"bytes": "18084"
},
{
"name": "Makefile",
"bytes": "444"
},
{
"name": "Objective-C",
"bytes": "8007"
},
{
"name": "Objective-C++",
"bytes": "92495"
},
{
"name": "Perl",
"bytes": "174107"
},
{
"name": "Python",
"bytes": "519850"
},
{
"name": "QMake",
"bytes": "340"
},
{
"name": "Shell",
"bytes": "2186"
},
{
"name": "Tcl",
"bytes": "1438489"
},
{
"name": "TeX",
"bytes": "123478"
},
{
"name": "Yacc",
"bytes": "96542"
}
],
"symlink_target": ""
} |
"""Provides Qt3DCore classes and functions."""
# Local imports
from . import PYQT5, PYQT6, PYSIDE2, PYSIDE6, PythonQtError
if PYQT5:
from PyQt5.Qt3DCore import *
elif PYQT6:
from PyQt6.Qt3DCore import *
elif PYSIDE6:
from PySide6.Qt3DCore.Qt3DCore import *
elif PYSIDE2:
# https://bugreports.qt.io/projects/PYSIDE/issues/PYSIDE-1026
import PySide2.Qt3DCore as __temp
import inspect
for __name in inspect.getmembers(__temp.Qt3DCore):
globals()[__name[0]] = __name[1]
else:
raise PythonQtError('No Qt bindings could be found')
| {
"content_hash": "5852b6eb7e2ef01a71cbdfffce2a16ca",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 29.842105263157894,
"alnum_prop": 0.7072310405643739,
"repo_name": "davvid/qtpy",
"id": "57f1ef1c65f68a2d993c1aa141f8775611668d21",
"size": "856",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qtpy/Qt3DCore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69789"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
"""Unified in-memory receipt model."""
from oslo_log import log
from oslo_serialization import msgpackutils
from oslo_utils import reflection
from keystone.auth import core
from keystone.common import cache
from keystone.common import provider_api
from keystone import exception
from keystone.identity.backends import resource_options as ro
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
class ReceiptModel(object):
"""An object that represents a receipt emitted by keystone.
This is a queryable object that other parts of keystone can use to reason
about a user's receipt.
"""
def __init__(self):
self.user_id = None
self.__user = None
self.__user_domain = None
self.methods = None
self.__required_methods = None
self.__expires_at = None
self.__issued_at = None
def __repr__(self):
"""Return string representation of KeystoneReceipt."""
desc = ('<%(type)s at %(loc)s>')
self_cls_name = reflection.get_class_name(self, fully_qualified=False)
return desc % {'type': self_cls_name, 'loc': hex(id(self))}
@property
def expires_at(self):
return self.__expires_at
@expires_at.setter
def expires_at(self, value):
if not isinstance(value, str):
raise ValueError('expires_at must be a string.')
self.__expires_at = value
@property
def issued_at(self):
return self.__issued_at
@issued_at.setter
def issued_at(self, value):
if not isinstance(value, str):
raise ValueError('issued_at must be a string.')
self.__issued_at = value
@property
def user(self):
if not self.__user:
if self.user_id:
self.__user = PROVIDERS.identity_api.get_user(self.user_id)
return self.__user
@property
def user_domain(self):
if not self.__user_domain:
if self.user:
self.__user_domain = PROVIDERS.resource_api.get_domain(
self.user['domain_id']
)
return self.__user_domain
@property
def required_methods(self):
if not self.__required_methods:
mfa_rules = self.user['options'].get(
ro.MFA_RULES_OPT.option_name, [])
rules = core.UserMFARulesValidator._parse_rule_structure(
mfa_rules, self.user_id)
methods = set(self.methods)
active_methods = set(core.AUTH_METHODS.keys())
required_auth_methods = []
for r in rules:
r_set = set(r).intersection(active_methods)
if r_set.intersection(methods):
required_auth_methods.append(list(r_set))
self.__required_methods = required_auth_methods
return self.__required_methods
def mint(self, receipt_id, issued_at):
"""Set the ``id`` and ``issued_at`` attributes of a receipt.
The process of building a Receipt requires setting attributes about the
partial authentication context, like ``user_id`` and ``methods`` for
example. Once a Receipt object accurately represents this information
it should be "minted". Receipt are minted when they get an ``id``
attribute and their creation time is recorded.
"""
self.id = receipt_id
self.issued_at = issued_at
class _ReceiptModelHandler(object):
identity = 125
handles = (ReceiptModel,)
def __init__(self, registry):
self._registry = registry
def serialize(self, obj):
serialized = msgpackutils.dumps(obj.__dict__, registry=self._registry)
return serialized
def deserialize(self, data):
receipt_data = msgpackutils.loads(data, registry=self._registry)
try:
receipt_model = ReceiptModel()
for k, v in iter(receipt_data.items()):
setattr(receipt_model, k, v)
except Exception:
LOG.debug(
"Failed to deserialize ReceiptModel. Data is %s", receipt_data
)
raise exception.CacheDeserializationError(
ReceiptModel.__name__, receipt_data
)
return receipt_model
cache.register_model_handler(_ReceiptModelHandler)
| {
"content_hash": "87b6292a0608c7f5fcd670d91c05bc58",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 31.54014598540146,
"alnum_prop": 0.6068039805600556,
"repo_name": "openstack/keystone",
"id": "bb015b17271ed8792afe8e9945f09dad26560d2b",
"size": "4867",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/models/receipt_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "6213900"
},
{
"name": "Shell",
"bytes": "30491"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import numpy as np
import os
import PIL.Image
import PIL.ImageDraw
import digits
from digits.utils import subclass, override
from .forms import ConfigForm
from ..interface import VisualizationInterface
CONFIG_TEMPLATE = "config_template.html"
VIEW_TEMPLATE = "view_template.html"
@subclass
class Visualization(VisualizationInterface):
"""
A visualization extension to display image gradient magnitude and direction
"""
def __init__(self, dataset, **kwargs):
"""
Init
"""
# arrow config
arrow_color = kwargs['arrow_color']
if arrow_color == "red":
self.color = (255, 0, 0)
elif arrow_color == "green":
self.color = (0, 255, 0)
elif arrow_color == "blue":
self.color = (0, 0, 255)
else:
raise ValueError("unknown color: %s" % arrow_color)
self.arrow_size = float(kwargs['arrow_size'])
# image dimensions (HWC)
image_shape = dataset.get_feature_dims()
self.height = image_shape[0]
self.width = image_shape[1]
# memorize view template for later use
extension_dir = os.path.dirname(os.path.abspath(__file__))
self.view_template = open(
os.path.join(extension_dir, VIEW_TEMPLATE), "r").read()
@staticmethod
def get_config_form():
return ConfigForm()
@staticmethod
def get_config_template(form):
"""
parameters:
- form: form returned by get_config_form(). This may be populated
with values if the job was cloned
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(
os.path.join(extension_dir, CONFIG_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
def get_id():
return digits.extensions.data.imageGradients.data.DataIngestion.get_id()
@staticmethod
def get_title():
return digits.extensions.data.imageGradients.data.DataIngestion.get_title()
@override
def get_view_template(self, data):
"""
parameters:
- data: data returned by process_data()
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config
options
- context is a dictionary of context variables to use for
rendering the form
"""
return self.view_template, {'gradients': data['gradients'], 'image': data['image']}
@override
def process_data(self, input_id, input_data, output_data):
"""
Process one inference and return data to visualize
"""
# assume only one output and grayscale input
output_vector = output_data[output_data.keys()[0]]
grad = np.array(
[output_vector[0] * self.width,
output_vector[1] * self.height])
grad_rotated_90 = np.array([-grad[1], grad[0]])
center = np.array([self.width / 2, self.height / 2])
arrow = grad * (self.arrow_size / 100.)
arrow_tip = center + arrow/2
arrow_tail = center - arrow/2
# arrow tail (anticlockwise)
at_acw = arrow_tail + 0.1 * grad_rotated_90
# arrow tail (clockwise)
at_cw = arrow_tail - 0.1 * grad_rotated_90
# draw an oriented caret
image = PIL.Image.fromarray(input_data).convert('RGB')
draw = PIL.ImageDraw.Draw(image)
draw.line(
(at_acw[0], at_acw[1], arrow_tip[0], arrow_tip[1]),
fill=self.color)
draw.line(
(at_cw[0], at_cw[1], arrow_tip[0], arrow_tip[1]),
fill=self.color)
draw.line(
(at_acw[0], at_acw[1], at_cw[0], at_cw[1]),
fill=self.color)
image_html = digits.utils.image.embed_image_html(image)
return {'image': image_html,
'gradients': [output_vector[0], output_vector[1]]}
| {
"content_hash": "dc09519912454636ea3011055dee83d6",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 91,
"avg_line_length": 32.883720930232556,
"alnum_prop": 0.5874587458745875,
"repo_name": "jmancewicz/DIGITS",
"id": "32de6aab77459c7f98a6e887e1be3a4b1b830303",
"size": "4306",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "digits/extensions/view/imageGradients/view.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2557"
},
{
"name": "HTML",
"bytes": "224818"
},
{
"name": "JavaScript",
"bytes": "132563"
},
{
"name": "Lua",
"bytes": "12103"
},
{
"name": "Python",
"bytes": "727804"
},
{
"name": "Shell",
"bytes": "4225"
}
],
"symlink_target": ""
} |
"""Provides a common base for Apache proxies"""
import os
import shutil
import subprocess
import mock
import zope.interface
from certbot import configuration
from certbot import errors as le_errors
from certbot_apache import configurator
from certbot_apache import constants
from certbot_compatibility_test import errors
from certbot_compatibility_test import interfaces
from certbot_compatibility_test import util
from certbot_compatibility_test.configurators import common as configurators_common
@zope.interface.implementer(interfaces.IConfiguratorProxy)
class Proxy(configurators_common.Proxy):
# pylint: disable=too-many-instance-attributes
"""A common base for Apache test configurators"""
def __init__(self, args):
"""Initializes the plugin with the given command line args"""
super(Proxy, self).__init__(args)
self.le_config.apache_le_vhost_ext = "-le-ssl.conf"
self.modules = self.server_root = self.test_conf = self.version = None
patch = mock.patch(
"certbot_apache.configurator.display_ops.select_vhost")
mock_display = patch.start()
mock_display.side_effect = le_errors.PluginError(
"Unable to determine vhost")
def load_config(self):
"""Loads the next configuration for the plugin to test"""
config = super(Proxy, self).load_config()
self._all_names, self._test_names = _get_names(config)
server_root = _get_server_root(config)
shutil.rmtree("/etc/apache2")
shutil.copytree(server_root, "/etc/apache2", symlinks=True)
self._prepare_configurator()
try:
subprocess.check_call("apachectl -k restart".split())
except errors.Error:
raise errors.Error(
"Apache failed to load {0} before tests started".format(
config))
return config
def _prepare_configurator(self):
"""Prepares the Apache plugin for testing"""
for k in constants.CLI_DEFAULTS_DEBIAN.keys():
setattr(self.le_config, "apache_" + k, constants.os_constant(k))
# An alias
self.le_config.apache_handle_modules = self.le_config.apache_handle_mods
self._configurator = configurator.ApacheConfigurator(
config=configuration.NamespaceConfig(self.le_config),
name="apache")
self._configurator.prepare()
def cleanup_from_tests(self):
"""Performs any necessary cleanup from running plugin tests"""
super(Proxy, self).cleanup_from_tests()
mock.patch.stopall()
def _get_server_root(config):
"""Returns the server root directory in config"""
subdirs = [
name for name in os.listdir(config)
if os.path.isdir(os.path.join(config, name))]
if len(subdirs) != 1:
errors.Error("Malformed configuration directory {0}".format(config))
return os.path.join(config, subdirs[0].rstrip())
def _get_names(config):
"""Returns all and testable domain names in config"""
all_names = set()
non_ip_names = set()
with open(os.path.join(config, "vhosts")) as f:
for line in f:
# If parsing a specific vhost
if line[0].isspace():
words = line.split()
if words[0] == "alias":
all_names.add(words[1])
non_ip_names.add(words[1])
# If for port 80 and not IP vhost
elif words[1] == "80" and not util.IP_REGEX.match(words[3]):
all_names.add(words[3])
non_ip_names.add(words[3])
elif "NameVirtualHost" not in line:
words = line.split()
if (words[0].endswith("*") or words[0].endswith("80") and
not util.IP_REGEX.match(words[1]) and
words[1].find(".") != -1):
all_names.add(words[1])
return all_names, non_ip_names
| {
"content_hash": "bfc58e68623187ea77f81fe735c67542",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 83,
"avg_line_length": 36.37614678899082,
"alnum_prop": 0.6189155107187894,
"repo_name": "jtl999/certbot",
"id": "64170ca728a02c38f184996acfae0bd0babf9f97",
"size": "3965",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "certbot-compatibility-test/certbot_compatibility_test/configurators/apache/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62302"
},
{
"name": "Augeas",
"bytes": "5245"
},
{
"name": "Batchfile",
"bytes": "35005"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37245"
},
{
"name": "Nginx",
"bytes": "118585"
},
{
"name": "Python",
"bytes": "1477643"
},
{
"name": "Shell",
"bytes": "176838"
},
{
"name": "Standard ML",
"bytes": "256"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("ipn", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="paypalipn",
name="mp_id",
field=models.CharField(max_length=128, null=True, blank=True),
preserve_default=True,
),
]
| {
"content_hash": "9639739c1efc5986eba2653a0e2139fc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 22.41176470588235,
"alnum_prop": 0.5643044619422573,
"repo_name": "spookylukey/django-paypal",
"id": "b3a54dd752c480a93400fda956f39dba7f8b772c",
"size": "381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "paypal/standard/ipn/migrations/0002_paypalipn_mp_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5282"
},
{
"name": "Python",
"bytes": "207294"
},
{
"name": "Shell",
"bytes": "326"
}
],
"symlink_target": ""
} |
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
import os.path
class Material(object):
def __init__(self):
self.name = ""
self.texture_fname = None
self.texture_id = None
class FaceGroup(object):
def __init__(self):
self.tri_indices = []
self.material_name = ""
class Model3D(object):
def __init__(self):
self.vertices = []
self.tex_coords = []
self.normals = []
self.materials = {}
self.face_groups = []
self.display_list_id = None
def __del__(self):
#Called when the model is cleaned up by Python
self.free_resources()
def free_resources(self):
# Delete the display list and textures
if self.display_list_id is not None:
glDeleteLists(self.display_list_id, 1)
self.display_list_id = None
# Delete any textures we used
for material in self.materials.values():
if material.texture_id is not None:
glDeleteTextures(material.texture_id)
# Clear all the materials
self.materials.clear()
# Clear the geometry lists
del self.vertices[:]
del self.tex_coords[:]
del self.normals[:]
del self.face_groups[:]
def read_obj(self, fname):
current_face_group = None
file_in = open(fname)
for line in file_in:
# Parse command and data from each line
words = line.split()
command = words[0]
data = words[1:]
if command == 'mtllib': # Material library
model_path = os.path.split(fname)[0]
mtllib_path = os.path.join( model_path, data[0] )
self.read_mtllib(mtllib_path)
elif command == 'v': # Vertex
x, y, z = data
vertex = (float(x), float(y), float(z))
self.vertices.append(vertex)
elif command == 'vt': # Texture coordinate
s, t = data
tex_coord = (float(s), float(t))
self.tex_coords.append(tex_coord)
elif command == 'vn': # Normal
x, y, z = data
normal = (float(x), float(y), float(z))
self.normals.append(normal)
elif command == 'usemtl' : # Use material
current_face_group = FaceGroup()
current_face_group.material_name = data[0]
self.face_groups.append( current_face_group )
elif command == 'f':
assert len(data) == 3, "Sorry, only triangles are supported"
# Parse indices from triples
for word in data:
vi, ti, ni = word.split('/')
indices = (int(vi) - 1, int(ti) - 1, int(ni) - 1)
current_face_group.tri_indices.append(indices)
for material in self.materials.values():
model_path = os.path.split(fname)[0]
texture_path = os.path.join(model_path, material.texture_fname)
texture_surface = pygame.image.load(texture_path)
texture_data = pygame.image.tostring(texture_surface, 'RGB', True)
material.texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, material.texture_id)
glTexParameteri( GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER,
GL_LINEAR)
glTexParameteri( GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
width, height = texture_surface.get_rect().size
gluBuild2DMipmaps( GL_TEXTURE_2D,
3,
width,
height,
GL_RGB,
GL_UNSIGNED_BYTE,
texture_data)
def read_mtllib(self, mtl_fname):
file_mtllib = open(mtl_fname)
for line in file_mtllib:
words = line.split()
command = words[0]
data = words[1:]
if command == 'newmtl':
material = Material()
material.name = data[0]
self.materials[data[0]] = material
elif command == 'map_Kd':
material.texture_fname = data[0]
def draw(self):
vertices = self.vertices
tex_coords = self.tex_coords
normals = self.normals
for face_group in self.face_groups:
material = self.materials[face_group.material_name]
glBindTexture(GL_TEXTURE_2D, material.texture_id)
glBegin(GL_TRIANGLES)
for vi, ti, ni in face_group.tri_indices:
glTexCoord2fv( tex_coords[ti] )
glNormal3fv( normals[ni] )
glVertex3fv( vertices[vi] )
glEnd()
def draw_quick(self):
if self.display_list_id is None:
self.display_list_id = glGenLists(1)
glNewList(self.display_list_id, GL_COMPILE)
self.draw()
glEndList()
glCallList(self.display_list_id)
| {
"content_hash": "41dc91465518123cb1d9ab7d6ab49be3",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 97,
"avg_line_length": 28.925925925925927,
"alnum_prop": 0.4947869032376075,
"repo_name": "PythonProgramming/Beginning-Game-Development-with-Python-and-Pygame",
"id": "096b98cf268bc094cfcb38cb51317361f02a764a",
"size": "5468",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Chapter 11/model3d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "598735"
}
],
"symlink_target": ""
} |
"""
Volume driver for NetApp NFS storage.
"""
import os
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
@interface.volumedriver
class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (7-mode)."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
def __init__(self, *args, **kwargs):
super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_7mode_opts)
def do_setup(self, context):
"""Do the customized set up on client if any for 7 mode."""
super(NetApp7modeNfsDriver, self).do_setup(context)
self.zapi_client = client_7mode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vfiler=self.configuration.netapp_vfiler)
self.perf_library = perf_7mode.Performance7modeLibrary(
self.zapi_client)
def check_for_setup_error(self):
"""Checks if setup occurred properly."""
api_version = self.zapi_client.get_ontapi_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported Data ONTAP version."
" Data ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Data ONTAP API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
self._add_looping_tasks()
super(NetApp7modeNfsDriver, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
super(NetApp7modeNfsDriver, self)._add_looping_tasks()
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume.
:param: is_snapshot Not used, present for method signature consistency
"""
(_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self.zapi_client.get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
target_path, source_snapshot)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_NFS_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats(
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function())
data['sparse_copy_volume'] = True
self._spawn_clean_cache_job()
self.zapi_client.provide_ems(self, netapp_backend, self._app_version,
server_type="7mode")
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
self.perf_library.update_performance_cache()
for nfs_share in self._mounted_shares:
capacity = self._get_share_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool['multiattach'] = True
pool.update(capacity)
thick = not self.configuration.nfs_sparsed_volumes
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
utilization = self.perf_library.get_node_utilization()
pool['utilization'] = na_utils.round_down(utilization, '0.01')
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pool['consistencygroup_support'] = True
pools.append(pool)
return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(_, export_path) = self._get_export_ip_path(share=share)
exported_volume = self.zapi_client.get_actual_path_for_export(
export_path)
for old_file in old_files:
path = os.path.join(exported_volume, old_file)
u_bytes = self.zapi_client.get_file_usage(path)
file_list.append((old_file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _is_filer_ip(self, ip):
"""Checks whether ip is on the same filer."""
try:
ifconfig = self.zapi_client.get_ifconfig()
if_info = ifconfig.get_child_by_name('interface-config-info')
if if_info:
ifs = if_info.get_children()
for intf in ifs:
v4_addr = intf.get_child_by_name('v4-primary-address')
if v4_addr:
ip_info = v4_addr.get_child_by_name('ip-address-info')
if ip_info:
address = ip_info.get_child_content('address')
if ip == address:
return True
else:
continue
except Exception:
return False
return False
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
if self._is_filer_ip(ip) and shares:
for share in shares:
ip_sh = share.split(':')[0]
if self._is_filer_ip(ip_sh):
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
thin = self.configuration.nfs_sparsed_volumes
return self._share_has_space_for_clone(share, volume['size'], thin)
def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Matches a volume type for share file."""
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if qos_policy_group:
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Setting file qos policy group is not supported"
" on this storage family and ontap version.")))
volume_type = na_utils.get_volume_type_from_volume(volume)
if volume_type and 'qos_spec_id' in volume_type:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("QoS specs are not supported"
" on this storage family and ONTAP version."))
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""
# 7-mode DOT does not support QoS.
return
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
flexvol_names = []
for nfs_share in self._mounted_shares:
flexvol_name = nfs_share.rsplit('/', 1)[1]
flexvol_names.append(flexvol_name)
LOG.debug("Found flexvol %s", flexvol_name)
return flexvol_names
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
for host in hosts:
pool_name = volume_utils.extract_host(host, level='pool')
flexvol_name = pool_name.rsplit('/', 1)[1]
flexvols.add(flexvol_name)
return flexvols
@utils.trace_method
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete files backing each snapshot in the cgsnapshot.
:return: An implicit update of snapshot models that the manager will
interpret and subsequently set the model state to deleted.
"""
for snapshot in snapshots:
self._delete_file(snapshot['volume_id'], snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
| {
"content_hash": "544dbeb263a879b4dcb2d81a15f99e87",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 79,
"avg_line_length": 40.99591836734694,
"alnum_prop": 0.6012544802867383,
"repo_name": "NetApp/cinder",
"id": "15a74483027d2fa2250d662163f539eda4a5f1ed",
"size": "11019",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/dataontap/nfs_7mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17587090"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
"""poster module
Support for streaming HTTP uploads, and multipart/form-data encoding
```poster.version``` is a 3-tuple of integers representing the version number.
New releases of poster will always have a version number that compares greater
than an older version of poster.
New in version 0.6."""
import vistrails.core.repository.poster.streaminghttp
import vistrails.core.repository.poster.encode
#version = (0, 7, 0) # Thanks JP!
| {
"content_hash": "ce589480eac2efd6ffb747ae9232cc81",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 78,
"avg_line_length": 36.5,
"alnum_prop": 0.7831050228310502,
"repo_name": "celiafish/VisTrails",
"id": "bd04291ed4ed97ca53f7483ddf0c5436b24cf8fc",
"size": "3413",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/core/repository/poster/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import pytest
from six.moves import reload_module
# HACK: if the plugin is imported before the coverage plugin then all
# the top-level code will be omitted from coverage, so force it to be
# reloaded within this unit test under coverage
import pytest_fixture_config
reload_module(pytest_fixture_config)
from pytest_fixture_config import Config, requires_config, yield_requires_config
class DummyConfig(Config):
__slots__ = ('foo', 'bar')
def test_config_update():
cfg = DummyConfig(foo=1,
bar=2
)
cfg.update({"foo": 10, "bar":20})
assert cfg.foo == 10
assert cfg.bar == 20
with pytest.raises(ValueError):
cfg.update({"baz": 30})
CONFIG1 = DummyConfig(foo=None, bar=1)
@pytest.fixture
@requires_config(CONFIG1, ('foo', 'bar'))
def a_fixture(request):
raise ValueError('Should not run')
def test_requires_config_skips(a_fixture):
raise ValueError('Should not run')
@pytest.fixture
@requires_config(CONFIG1, ('bar',))
def another_fixture(request):
return 'xxxx'
def test_requires_config_doesnt_skip(another_fixture):
assert another_fixture == 'xxxx'
@pytest.yield_fixture
@yield_requires_config(CONFIG1, ('foo', 'bar'))
def yet_another_fixture():
raise ValueError('Should also not run')
yield 'yyyy'
def test_yield_requires_config_skips(yet_another_fixture):
raise ValueError('Should also not run')
@pytest.yield_fixture
@yield_requires_config(CONFIG1, ('bar',))
def yet_some_other_fixture():
yield 'yyyy'
def test_yield_requires_config_doesnt_skip(yet_some_other_fixture):
assert yet_some_other_fixture == 'yyyy'
| {
"content_hash": "fd2ea3da6048ab437cdf1bbfc1879866",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 24.33823529411765,
"alnum_prop": 0.6888217522658611,
"repo_name": "manahl/pytest-plugins",
"id": "4008bf401e89db5989479de80cae7131412fec79",
"size": "1655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytest-fixture-config/tests/unit/test_fixture_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1581"
},
{
"name": "Python",
"bytes": "227066"
},
{
"name": "Shell",
"bytes": "5947"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from mercurial.pycompat import getattr
from mercurial import (
context as hgcontext,
dagop,
extensions,
hgweb,
patch,
util,
)
from . import (
context,
revmap,
)
class _lazyfctx(object):
"""delegates to fctx but do not construct fctx when unnecessary"""
def __init__(self, repo, node, path):
self._node = node
self._path = path
self._repo = repo
def node(self):
return self._node
def path(self):
return self._path
@util.propertycache
def _fctx(self):
return context.resolvefctx(self._repo, self._node, self._path)
def __getattr__(self, name):
return getattr(self._fctx, name)
def _convertoutputs(repo, annotated, contents):
"""convert fastannotate outputs to vanilla annotate format"""
# fastannotate returns: [(nodeid, linenum, path)], [linecontent]
# convert to what fctx.annotate returns: [annotateline]
results = []
fctxmap = {}
annotateline = dagop.annotateline
for i, (hsh, linenum, path) in enumerate(annotated):
if (hsh, path) not in fctxmap:
fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
# linenum: the user wants 1-based, we have 0-based.
lineno = linenum + 1
fctx = fctxmap[(hsh, path)]
line = contents[i]
results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
return results
def _getmaster(fctx):
"""(fctx) -> str"""
return fctx._repo.ui.config(b'fastannotate', b'mainbranch') or b'default'
def _doannotate(fctx, follow=True, diffopts=None):
"""like the vanilla fctx.annotate, but do it via fastannotate, and make
the output format compatible with the vanilla fctx.annotate.
may raise Exception, and always return line numbers.
"""
master = _getmaster(fctx)
with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
try:
annotated, contents = ac.annotate(
fctx.rev(), master=master, showpath=True, showlines=True
)
except Exception:
ac.rebuild() # try rebuild once
fctx._repo.ui.debug(
b'fastannotate: %s: rebuilding broken cache\n' % fctx._path
)
try:
annotated, contents = ac.annotate(
fctx.rev(), master=master, showpath=True, showlines=True
)
except Exception:
raise
assert annotated and contents
return _convertoutputs(fctx._repo, annotated, contents)
def _hgwebannotate(orig, fctx, ui):
diffopts = patch.difffeatureopts(
ui, untrusted=True, section=b'annotate', whitespace=True
)
return _doannotate(fctx, diffopts=diffopts)
def _fctxannotate(
orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
):
if skiprevs:
# skiprevs is not supported yet
return orig(
self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts
)
try:
return _doannotate(self, follow, diffopts)
except Exception as ex:
self._repo.ui.debug(
b'fastannotate: falling back to the vanilla annotate: %r\n' % ex
)
return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts)
def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
# skipset: a set-like used to test if a fctx needs to be downloaded
with context.fctxannotatecontext(self, follow, diffopts) as ac:
skipset = revmap.revmap(ac.revmappath)
return orig(
self, follow, skiprevs=skiprevs, diffopts=diffopts, prefetchskip=skipset
)
def replacehgwebannotate():
extensions.wrapfunction(hgweb.webutil, b'annotate', _hgwebannotate)
def replacefctxannotate():
extensions.wrapfunction(hgcontext.basefilectx, b'annotate', _fctxannotate)
| {
"content_hash": "1df2c36cdd765da28063cc4ac585c939",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 80,
"avg_line_length": 30.153846153846153,
"alnum_prop": 0.6380102040816327,
"repo_name": "Maccimo/intellij-community",
"id": "5844c5d48d4db6126f941df2f6ded695b7a0bbd2",
"size": "4171",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "plugins/hg4idea/testData/bin/hgext/fastannotate/support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import requests
import os
from hautomation_restclient import RestApiException
from simplejson import JSONDecodeError
DEVICE_URL = "rest/manage/device/"
DEVICE_BY_PROTOCOL_URL = "rest/manage/device?protocol={protocol}"
DEVICE_BY_ID_URL = "rest/manage/device/{protocol}/{did}"
PROTOCOL_URL = "rest/manage/protocol/"
def get_protocols(server_url, username, password):
url = os.path.join(server_url, PROTOCOL_URL)
r = requests.get(url, headers={"USERNAME": username, "PASSWORD": password}, allow_redirects=False)
if r.status_code != 200:
try:
raise RestApiException(r.json(), r.status_code)
except JSONDecodeError:
raise RestApiException(r.text, r.status_code)
return [x["name"] for x in r.json()]
def add_device(protocol, did, caption, device_type, server_url, username, password):
url = os.path.join(server_url, DEVICE_URL)
data = {
"protocol": protocol,
"did": did,
"device_type": device_type,
"caption": caption,
}
r = requests.post(url, data=data, headers={"USERNAME": username, "PASSWORD": password}, allow_redirects=False)
if r.status_code != 302:
try:
raise RestApiException(r.json(), r.status_code)
except JSONDecodeError:
raise RestApiException(r.text, r.status_code)
return True
def del_device(protocol, did, server_url, username, password):
url = os.path.join(server_url, DEVICE_BY_ID_URL.format(**{"did": did, "protocol": protocol}))
r = requests.delete(url, headers={"USERNAME": username, "PASSWORD": password})
if r.status_code != 204:
try:
raise RestApiException(r.json(), r.status_code)
except JSONDecodeError:
raise RestApiException(r.text, r.status_code)
return True
def get_device(protocol, did, server_url, username, password):
url = os.path.join(server_url, DEVICE_BY_ID_URL.format(**{"did": did, "protocol": protocol}))
r = requests.get(url, headers={"USERNAME": username, "PASSWORD": password})
if r.status_code == 404:
return []
if r.status_code != 200:
try:
raise RestApiException(r.json(), r.status_code)
except JSONDecodeError:
raise RestApiException(r.text, r.status_code)
return r.json()
def upd_device(protocol, did, server_url, changes, username, password):
url = os.path.join(server_url, DEVICE_BY_ID_URL.format(**{"did": did, "protocol": protocol}))
r = requests.put(url, headers={"USERNAME": username, "PASSWORD": password}, data=changes, allow_redirects=False)
if r.status_code != 302:
try:
raise RestApiException(r.json(), r.status_code)
except JSONDecodeError:
raise RestApiException(r.text, r.status_code)
return True
def list_devices(protocol, server_url, username, password):
url = os.path.join(server_url, DEVICE_BY_PROTOCOL_URL.format(**{"protocol": protocol}))
r = requests.get(url, headers={"USERNAME": username, "PASSWORD": password})
if r.status_code == 404:
return []
if r.status_code != 200:
try:
raise RestApiException(r.json(), r.status_code)
except JSONDecodeError:
raise RestApiException(r.text, r.status_code)
return r.json()
| {
"content_hash": "6c4299b024c1b6c059adbd04d7a1bab1",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 116,
"avg_line_length": 35.82608695652174,
"alnum_prop": 0.6474514563106796,
"repo_name": "jpardobl/hautomation_restclient",
"id": "6049e2e14274489fbc60969d764e3a477ff60dc1",
"size": "3296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hautomation_restclient/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10484"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.