repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
jeremiah-c-leary/vhdl-style-guide
|
vsg/vhdlFile/classify/integer_type_definition.py
|
Python
|
gpl-3.0
| 211
| 0
|
from vsg.vhdlFile.clas
|
sify import range_constraint
def detect(iToken, lObjects):
'''
integer_type_definition ::=
range_constraint
'''
return range_constraint.detect(iToken, lOb
|
jects)
|
sean797/tracer
|
tracer/__init__.py
|
Python
|
gpl-2.0
| 273
| 0.018315
|
from __futur
|
e__ import absolute_import
from tracer.query import Query
from tracer.resources.package import Package
from tracer.resources.applications import Application
from tracer.resources.proce
|
sses import Process
__all__ = [
Query,
Package,
Application,
Process,
]
|
caot/intellij-community
|
python/testData/keywordCompletion/exceptNotIndented.py
|
Python
|
apache-2.0
| 26
| 0.115385
|
try:
|
a = 1
exce<caret
|
>
|
goanpeca/mongokit
|
mongokit/operators.py
|
Python
|
bsd-3-clause
| 3,264
| 0.000306
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2010, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Univers
|
ity of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific
|
prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class SchemaOperator(object):
repr = None
def __init__(self, *args):
assert self.repr is not None
self._operands = list(args)
def __repr__(self):
return str(self)
def __iter__(self):
for operand in self._operands:
yield operand
def __eq__(self, other):
return type(self) == type(other) and self._operands == other._operands
def validate(self, value):
raise NotImplementedError
class OR(SchemaOperator):
repr = 'or'
def __init__(self, *args):
super(OR, self).__init__(*args)
def __str__(self):
repr = ' %s ' % self.repr
return '<'+repr.join([i.__name__ for i in self._operands]) + '>'
def validate(self, value):
return isinstance(value, tuple(self._operands))
class NOT(SchemaOperator):
repr = 'not'
def __init__(self, *args):
super(NOT, self).__init__(*args)
def __str__(self):
repr = ', %s ' % self.repr
return '<not '+repr.join([i.__name__ for i in self._operands]) + '>'
def validate(self, value):
return not isinstance(value, tuple(self._operands))
class IS(SchemaOperator):
repr = 'is'
def __init__(self, *args):
super(IS, self).__init__(*args)
def __str__(self):
representation = ' or %s ' % self.repr
return '<is '+representation.join([repr(i) for i in self._operands]) + '>'
def validate(self, value):
if value in self._operands:
for op in self._operands:
if value == op and isinstance(value, type(op)):
return True
return False
|
qliu/globe_nocturne
|
globenocturne/globenocturneapp/models.py
|
Python
|
gpl-2.0
| 10,974
| 0.013851
|
from django.contrib.gis.db import models
class SatYear(models.Model):
year = models.IntegerField(primary_key=True)
def __unicode__(self):
return str(self.year)
class Meta:
verbose_name = 'Year'
db_table = u'sat_year'
class Satellite(models.Model):
# id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=10)
def __unicode__(self):
return self.name
|
class Meta:
verbose_name = 'Satellite'
db_table = u'satellite'
class DMSPProduct(models.Model):
# id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'DMSP Product'
db_table = u'dmsp_product'
class DMSPDataset(models.Model):
# id = models.IntegerField(primary_key=True)
name = mod
|
els.CharField(max_length=100,null='True',blank='True')
year = models.ForeignKey('SatYear',verbose_name='Year')
satellite = models.ForeignKey('Satellite',verbose_name='Satellite')
product = models.ForeignKey('DMSPProduct',verbose_name='DMSP Product')
wms_layer = models.CharField(max_length=100,verbose_name='WMS Layer')
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
# auto generate name based on selection
self.name = "%s-%s-%s" % (str(self.year),self.satellite.name,self.product.name)
super(DMSPDataset, self).save(*args, **kwargs)
def previous(self):
try:
previous_records = DMSPDataset.objects.filter(id__lt=self.id)
previous_id = previous_records.order_by('-id')[0].id
return DMSPDataset.objects.get(id=previous_id)
except:
return None
def next(self):
try:
next_records = DMSPDataset.objects.filter(id__gt=self.id)
next_id = next_records.order_by('id')[0].id
return DMSPDataset.objects.get(id=next_id)
except:
return None
class Meta:
verbose_name = 'DMSP Dataset'
db_table = u'dmsp_dataset'
class WorldBorder(models.Model):
# gid = models.IntegerField(primary_key=True)
iso = models.CharField(max_length=2)
country = models.CharField(max_length=50)
countryaff = models.CharField(max_length=50)
affiso = models.CharField(max_length=2)
geom = models.MultiPolygonField(srid=3857)
objects = models.GeoManager()
def __str__(self):
return self.country
class Meta:
verbose_name = 'World Border'
db_table = u'worldborder'
class WorldCountry(models.Model):
# id = models.IntegerField(primary_key=True)
fips = models.CharField(max_length=2,null=True,blank=True)
iso = models.CharField(max_length=2,null=True,blank=True)
iso3digit = models.CharField(max_length=3,null=True,blank=True)
name = models.CharField(max_length=250,null=True,blank=True)
capital = models.CharField(max_length=250,null=True,blank=True)
continent = models.CharField(max_length=250,null=True,blank=True)
# area_km = models.FloatField(null=True,blank=True)
def __unicode__(self):
return self.fips
def previous(self):
try:
previous_records = WorldCountry.objects.filter(id__lt=self.id)
previous_id = previous_records.order_by('-id')[0].id
return WorldCountry.objects.get(id=previous_id)
except:
return None
def next(self):
try:
next_records = WorldCountry.objects.filter(id__gt=self.id)
next_id = next_records.order_by('id')[0].id
return WorldCountry.objects.get(id=next_id)
except:
return None
class Meta:
verbose_name = 'World Country'
db_table = u'world_countries'
class WorldPoulation(models.Model):
# id = models.IntegerField(primary_key=True)
country = models.ForeignKey('WorldCountry')
year = models.IntegerField(null=True,blank=True)
value = models.FloatField(null=True,blank=True)
def previous(self):
try:
previous_records = WorldPoulation.objects.filter(id__lt=self.id)
previous_id = previous_records.order_by('-id')[0].id
return WorldPoulation.objects.get(id=previous_id)
except:
return None
def next(self):
try:
next_records = WorldPoulation.objects.filter(id__gt=self.id)
next_id = next_records.order_by('id')[0].id
return WorldPoulation.objects.get(id=next_id)
except:
return None
class Meta:
verbose_name = 'World Poulation'
db_table = u'world_population'
class WorldGDP(models.Model):
# id = models.IntegerField(primary_key=True)
country = models.ForeignKey('WorldCountry')
year = models.IntegerField(null=True,blank=True)
value = models.FloatField(null=True,blank=True)
def __unicode__(self):
return str(self.value)
def previous(self):
try:
previous_records = WorldGDP.objects.filter(id__lt=self.id)
previous_id = previous_records.order_by('-id')[0].id
return WorldGDP.objects.get(id=previous_id)
except:
return None
def next(self):
try:
next_records = WorldGDP.objects.filter(id__gt=self.id)
next_id = next_records.order_by('id')[0].id
return WorldGDP.objects.get(id=next_id)
except:
return None
class Meta:
verbose_name = 'World GDP'
db_table = u'world_gdp'
class WorldSOL(models.Model):
# id = models.IntegerField(primary_key=True)
country = models.ForeignKey('WorldCountry')
year = models.IntegerField(null=True,blank=True)
value = models.FloatField(null=True,blank=True)
def __unicode__(self):
return str(self.value)
def previous(self):
try:
previous_records = WorldSOL.objects.filter(id__lt=self.id)
previous_id = previous_records.order_by('-id')[0].id
return WorldSOL.objects.get(id=previous_id)
except:
return None
def next(self):
try:
next_records = WorldSOL.objects.filter(id__gt=self.id)
next_id = next_records.order_by('id')[0].id
return WorldSOL.objects.get(id=next_id)
except:
return None
class Meta:
verbose_name = 'World Sum of Lights'
db_table = u'world_sol'
class WorldOriginalSOL(models.Model):
# id = models.IntegerField(primary_key=True)
country = models.ForeignKey('WorldCountry')
year = models.IntegerField(null=True,blank=True)
sat = models.CharField(max_length=3)
sol = models.FloatField(null=True,blank=True)
dn_range_min = models.FloatField(null=True,blank=True)
dn_range_max = models.FloatField(null=True,blank=True)
pixels_in_polygon = models.IntegerField(null=True,blank=True)
pixels_in_range = models.IntegerField(null=True,blank=True)
pixels_zero = models.IntegerField(null=True,blank=True)
dn_min = models.FloatField(null=True,blank=True)
dn_max = models.FloatField(null=True,blank=True)
avg = models.FloatField(null=True,blank=True)
def __unicode__(self):
return "%s-%s-%s" % (self.country,self.sat,self.year)
def previous(self):
try:
previous_records = WorldOriginalSOL.objects.filter(id__lt=self.id)
previous_id = previous_records.order_by('-id')[0].id
return WorldOriginalSOL.objects.get(id=previous_id)
except:
return None
def next(self):
try:
next_records = WorldOriginalSOL.objects.filter(id__gt=self.id)
next_id = next_records.order_by('id')[0].id
return WorldOriginalSOL.objects.get(id=next_id)
except:
return None
class Meta:
verbose_name = 'World Original Su
|
nave91/teak-nbtree
|
src/project.py
|
Python
|
gpl-2.0
| 1,490
| 0.043624
|
from reader import *
from di
|
st import *
from sys import *
from table import *
def project(z,data):
d = anyi(data[z])
if d == len(data[z]):
d-=1
x = [0]*len(data[z])
y = [0]*len(data[z])
east = furthest(d,data,z)
west = furthest(data[z].index(east),data,z)
inde = data[z].index(east)
indw = data[z].index(west)
project0(inde,indw,data,z,x,y,count)
return widen(z,x,y,more,less)
def project0(east,west,data,z,x,y,count):
prin
|
t "+"
bigger = 1.05
some = 0.000001
c = dist(data[z][east],data[z][west],data,z,indep,nump)
for d in data[z]:
ind = data[z].index(d)
a = dist(data[z][ind],data[z][east],data,z,indep,nump)
b = dist(data[z][ind],data[z][west],data,z,indep,nump)
if a > c*bigger:
return project0(east,ind,data,z,x,y,count)
if b > c*bigger:
return project0(ind,west,data,z,x,y,count)
#print "."
x[ind] = (a**2 + c**2 - b**2) / (2*c + some)
y[ind] = (a**2 - x[ind]**2)**0.5
def widen(z,x,y,more,less):
adds = []
adds.extend(colname[z])
adds.extend(["$_XX"])
adds.extend(["$_yy"])
adds.extend(["_ZZ"])
w = "__"+z
makeTable(adds,w)
for d in data[z]:
ind = data[z].index(d)
hell = fromHell(data[z][ind],z,more,less)
wider = data[z][ind]
wider.extend([x[ind]])
wider.extend([y[ind]])
wider.extend([0])
addRow(wider,w)
return w
|
navdeepghai/bcommerce
|
bcommerce/utils/products.py
|
Python
|
mit
| 13,513
| 0.038111
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
'''
Developer Navdeep Ghai
Email navdeep@korecent.com
License Korecent Solution Pvt. Ltd.
'''
import frappe
from frappe import _, msgprint, throw
from bcommerce.utils.api import get_connection, is_exists
from erpnext.controllers.item_variant import create_variant
from bcommerce.utils.logger import make_logs
from bcommerce.exceptions import BcommerceProductError, CustomerDoesNotExist, OptionSetDoesNotExist
from frappe import ValidationError
from bcommerce.utils import get_resource, validate_resource
from bcommerce.utils.store import get_brand, sync_bulk_brands
'''
Sync Product if not already exists in local ERPNext system
This function is to aid to sync product while create order
If product does not exists on Local ERPNext System
'''
def sync_product(id, setting):
flag = frappe.db.get_value("Item", {"bcommerce_product_id":id}, as_dict=1)
if flag:
return flag.get("name")
else:
sync_options()
sync_bulk_brands()
conn = get_connection()
product = get_resource("Products", id)
if not product:
return
save_product(product, setting, conn)
'''
This is for Order Products
'''
def has_variants(product):
return True if len(product.product_options) >= 1 else False
'''
Traverse bulk synced products
'''
def sync_bulk_products(products, setting, conn):
sync_options()
sync_bulk_brands()
temp = None
for product in products:
temp = product
if not frappe.db.get_value("Item", {"bcommerce_product_id": product.id}):
try:
save_product(product, setting, conn)
except:
msg = _("Error while saving Product {0}, Frappe traceback {1}".\
format(temp.id, frappe.get_traceback()))
make_logs("Failed", "Product", msg, temp.id)
'''
Entry point for create items/variants
'''
def save_product(product, setting, conn):
if product.option_set_id:
save_variants(product, setting, conn)
else:
create_item(product,setting, False, None, conn)
'''
Parse all the product variant as these variant are
Refer to options on BigCommerce system
'''
def save_variants(product, setting, conn, update=False):
attributes = get_item_attributes(product)
create_item(product, setting, True, attributes, conn, update)
'''
Insert Item entry in database
'''
def create_item(product, setting, has_variants=False, attributes=None, conn=None, update=False):
brand = get_brand(product.brand_id)
if has_variants and not attributes:
message = _("There is problem with Product {0}, traceback: {1}".format(product.name,
"Item has no attribute"))
make_logs("Failed", "Product", message, product.id, False)
return
doc = None
filters = {}
if has_variants:
filters["has_variants"] = 1
filters["bcommerce_product_id"] = product.id
product_name = frappe.db.get_value("Item", filters, as_dict=True)
if not product_name:
image = get_image_url(product)
doc = frappe.get_doc({
"doctype": "Item",
"uom": _("Nos"),
"title": product.page_title,
"item_code": product.name,
"is_stock_item":1,
"image": image,
"stock_keeping_unit":product.sku,
"height": product.height,
"net_weight": product.weight,
"width": product.width,
"depth":product.depth,
"bcommerce_product_id": product.id,
"sync_with_bcommerce":1,
"default_warehouse": setting.warehouse,
"item_group": setting.item_group,
"has_variants": has_variants,
"attributes": attributes,
"brand": brand,
"description": product.description,
})
doc.save(ignore_permissions=True)
else:
doc = frappe.get_doc("Item", product_name.get("name"))
image = get_image_url(product)
doc.update({
"title":product.page_title,
"attributes":attributes,
"brand": brand,
"image":image,
"stock_keeping_unit":product.sku,
"depth":product.depth,
"width": product.width,
"height": product.height,
"net_weight": product.weight,
"item_group":setting.item_group,
"default_warehouse":setting.warehouse,
"description":product.description
})
doc.save(ignore_permissions=True)
if has_variants:
create_variants(attributes, doc, product, setting)
else:
create_item_price(product, doc, setting)
'''
Create Variant function to traverse all the sub opt
|
ions in
OptionSet and then create the Item Attribute
'''
def create_variants(attribute, template, product, setting):
attr_name = attrib
|
ute[0].get("attribute") if len(attribute) >= 1 else None
if not attr_name:
return
options = frappe.db.get_values("Item Attribute Value", filters={"parent": attr_name},
fieldname=["attribute_value", "abbr"], as_dict=True)
for opt in options:
args = {}
item_code = "{0}-{1}".format(template.item_code, opt.get("abbr"))
if not frappe.db.get_value("Item", item_code):
args[attr_name] = opt.get("attribute_value")
doc = create_variant(template.item_code, args)
doc.save(ignore_permissions=True)
create_item_price(product, doc, setting)
else:
doc = frappe.get_doc("Item", item_code)
item_name = "{0}-{1}".format(product.name, opt.get("abbr"))
doc.update({
"item_name": item_name
})
doc.save(ignore_permissions=True)
create_item_price(product, doc, setting)
'''
Parse the Product Options as Item attributes in ERPNext
Then save the item attributes and return the attributes
Which will further link to Item Master table to create Variants
'''
def get_item_attributes(product):
flag = frappe.db.get_value("Item Attribute", filters={"bcommerce_optionset_id":product.option_set_id}, as_dict=1)
if flag:
return [frappe.get_doc({
"doctype": "Item Variant Attribute",
"attribute": flag.get("name")
})]
else:
get_optionset(id=product.option_set_id)
flag = frappe.db.get_value("Item Attribute", filters={"bcommerce_optionset_id":product.option_set_id}, as_dict=1)
if flag:
return [frappe.get_doc({
"doctype": "Item Variant Attribute",
"attribute": flag.get("name")
})]
'''
Get standard images URL from bigcommerce and map it to Item
'''
def get_image_url(product):
url = ""
images = product.images()
if isinstance(images, list):
if len(images) >= 1:
image = images[0]
url = image.standard_url
return url
'''
Buying/Selling Item Prices
'''
def create_item_price(product, item, setting):
item_code, item_name = item.item_code, item.item_name
create_price(item_code, item_name, product.price, setting.selling_price_list, "selling")
create_price(item_code, item_name, product.cost_price, setting.buying_price_list, "buying")
'''
Set Item price for item
'''
def create_price(item_code, item_name, item_price, price_list, price_type):
flag =frappe.db.get_value("Item Price", filters={"item_code":item_code, "price_list":price_list}, as_dict=True)
if not flag:
frappe.get_doc({
"doctype": "Item Price",
"item_name": item_name,
"item_code": item_code,
"price_list": price_list,
price_type:1,
"price_list_rate":item_price
}).save(ignore_permissions=True)
else:
doc = frappe.get_doc("Item Price", flag.get("name"))
doc.update({
"price_list_rate": item_price
}).save(ignore_permissions=True)
'''
Get OptionSet
'''
def get_optionset(id):
try:
resource = get_resource("OptionSets", id)
options_values = get_options_values(resource)
create_item_attribute(resource, options_values)
except Exception as e:
msg = "{0} {1} {2}".format("OptionSet with id ", id, "Doesn't exist")
make_logs("Failed", "OptionSet", message=msg)
'''
Get OptionSets Options Value
'''
def get_options_values(optionset):
options_values = frappe._dict()
for option in optionset.options():
if option and (isinstance(option, dict)):
flag = frappe.db.get_value("Bcommerce Option", {"option_id":option.option_id}, as_dict=True)
if not flag:
sync_options(id=option.option_id)
flag = frappe.db.get_value("Bcommerce Option", {"option_id":option.option_id}, as_dict=True)
if not flag:
msg = "{0} {1} {2}".format("Option with id ", option.option_id, " Does not exists")
make_logs("Failed", "Option", message=msg)
continue
values = frappe.db.get_values("Bcommerce Option Value", {"parent":flag.get("name")},
"bcommerce_op
|
sebrandon1/tempest
|
tempest/api/volume/admin/test_qos.py
|
Python
|
apache-2.0
| 6,582
| 0
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common.utils import data_utils as utils
from tempest.common import waiters
from tempest import test
class QosSpecsV2TestJSON(base.BaseVolumeAdminTest):
"""Test the Cinder QoS-specs.
Tests for create, list, delete, show, associate,
disassociate, set/unset key V2 APIs.
"""
@classmethod
def resource_setup(cls):
super(QosSpecsV2TestJSON, cls).resource_setup()
# Create admin qos client
# Create a test shared qos-specs for tests
cls.qos_name = utils.rand_name(cls.__name__ + '-QoS')
cls.qos_consumer = 'front-end'
cls.created_qos = cls.create_test_qos_specs(cls.qos_name,
cls.qos_consumer,
read_iops_sec='2000')
def _create_delete_test_qos_with_given_consumer(self, consumer):
name = utils.rand_name(self.__class__.__name__ + '-qos')
qos = {'name': name, 'consumer': consumer}
body = self.create_test_qos_specs(name, consumer)
for key in ['name', 'consumer']:
self.assertEqual(qos[key], body[key])
self.admin_volume_qos_client.delete_qos(body['id'])
self.admin_volume_qos_client.wait_for_resource_deletion(body['id'])
# validate the deletion
list_qos = self.admin_volume_qos_client.list_qos()['qos_specs']
self.assertNotIn(body, list_qos)
def _test_associate_qos(self, vol_type_id):
self.admin_volume_qos_client.associate_qos(
self.created_qos['id'], vol_type_id)
@test.idempotent_id('7e15f883-4bef-49a9-95eb-f94209a1ced1')
def test_create_delete_qos_with_front_end_consumer(self):
"""Tests the creation and deletion of QoS specs
With consumer as front end
"""
self._create_delete_test_qos_with_given_consumer('front-end')
@test.idempotent_id('b115cded-8f58-4ee4-aab5-9192cfada08f')
def test_create_delete_qos_with_back_end_consumer(self):
"""Tests the creation and deletion of QoS specs
With consumer as back-end
"""
self._create_delete_test_qos_with_given_consumer('back-end')
@test.idempotent_id('f88d65eb-ea0d-487d-af8d-71f4011575a4')
def test_create_delete_qos_with_both_consumer(self):
"""Tests the creation and deletion of QoS specs
With consumer as both front end and back end
"""
self._create_delete_test_qos_with_given_consumer('both')
@test.idempotent_id('7aa214cc-ac1a-4397-931f-3bb2e83bb0fd')
def test_get_qos(self):
"""Tests the detail of a given qos-specs"""
body = self.admin_volume_qos_client.show_qos(
self.created_qos['id'])['qos_specs']
self.assertEqual(self.qos_name, body['name'])
self.assertEqual(self.qos_consumer, body['consumer'])
@test.idempotent_id('75e04226-bcf7-4595-a34b-fdf0736f38fc')
def test_list_qos(self):
"""Tests the list of all qos-specs"""
body = self.admin_volume_qos_client.list_qos()['qos_specs']
self.assertIn(self.created_qos, body)
@test.idempotent_id('ed00fd85-4494-45f2-8ceb-9e2048919aed')
def test_set_unset_qos_key(self):
"""Test the addition of a specs key to qos-specs"""
args = {'iops_bytes': '500'}
body = self.admin_volume_qos_client.set_qos_key(
self.created_qos['id'],
iops_bytes='500')['qos_specs']
self.assertEqual(args, body)
body = self.admin_volume_qos_client.show_qos(
self.created_qos['id'])['qos_specs']
self.assertEqual(args['iops_bytes'], body['specs']['iops_bytes'])
# test the deletion of a specs key from qos-specs
keys = ['iops_bytes']
self.admin_volume_qos_client.unset_qos_key(self.created_qos['id'],
keys)
operation = 'qos-key-unset'
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
self.created_qos['id'],
operation, keys)
body = self.admin_volume_qos_client.show_qos(
self.created_qos['id'])['qos_specs']
self.assertNotIn(keys[0], body['specs'])
@test.idempotent_id('1dd93c76-6420-485d-a771-874044c416ac')
def test_associate_disassociate_qos(self):
"""Test the following operations :
1. associate_qos
2. get_association_qos
3. disassociate_qos
4. disassociate_all_qos
"""
# create a test volume-type
vol_type = []
for _ in range(0, 3):
vol_type.append(self.create_volume_type())
# associate the qos-specs with volume-types
for i in range(0, 3):
self._test_associate_qos(vol_type[i]['id'])
# get the association of the qos-specs
body = self.admin_volume_qos_client.show_association_qos(
self.created_qos['id'])['qos_associations']
associations = [association['id'] for association in body]
for i in range(0, 3):
self.assertIn(vol_type[i]['id'], associations)
# disassociate a volume-type with qos-specs
self.admin_volume_qos_client.disassociate_qos(
self.created_qos['id'], vol_type[0]['id'])
operation = 'disassociate'
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
|
self.created_qos['id'], operation,
vol_type[0]['id'])
# disassociate all volume-types from qos-specs
self.admin_volume_qos_client.disassociate_all_qos(
self.created_qos['id'])
ope
|
ration = 'disassociate-all'
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
self.created_qos['id'], operation)
class QosSpecsV1TestJSON(QosSpecsV2TestJSON):
_api_version = 1
|
vickz84259/XKCD
|
scripts/argument.py
|
Python
|
mit
| 2,906
| 0
|
#!/usr/bin/env python
# Standard library modules
import ConfigParser
import argparse
import os
import textwrap
def create_config(configuration):
""" Function used to create the configuration file
if it does not exist in the program's path.
It returns a ConfigParser object
"""
configuration.add_section('Defaults')
configuration.set('Defaults', 'path', 'C:\\XKCD')
if not os.path.lexists('C:\\XKCD'):
os.mkdir('C:\\XKCD')
with open('xkcd.cfg', 'wb') as configfile:
configuration.write(configfile)
def get_args():
""" Function that parses the command line arguments and returns
them in a argparse.Namespace object
"""
config = ConfigParser.ConfigParser()
if not os.path.lexists('xkcd.cfg'):
create_config(config)
else:
config.read('xkcd.cfg')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Examples on how to use the program
----------------------------------
python xkcd.py --path C:\\Users\\admin\\Desktop\\xkcd --all
**all xkcd comics will be saved in path described
python xkcd.py -n 158
**downloads comic number 158
python xkcd.py --range 300 #
**downloads all comics from comic number 300
to the latest one. Inclusive of the latest one.
python xkcd.py --latest
**Downloads the latest comic.
'''))
parser.add_argument('-p', '--path', default=config.get('Defaults', 'path'),
help='The folder where the xkcd comics will be saved\
(default: %(default)s)')
# Only one of the arguments can be present in the command line.
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--latest', action='store_true', help='\
Downloads the latest comic.')
group.add_argument('-a', '--all', action='store_true', help='\
Downloads all xkcd comics.')
group.add_argument('-n', dest='comic_number', default=argparse.SUPPRESS,
type=int, help='The comic number to be downloaded.')
group.add_argument('--range', dest='comic_range',
default=argparse.SUPPRESS, nargs=2,
help='Download the range of comics.\
e.g.
|
--range 30 100 # represents th
|
e latest comic.')
args = parser.parse_args()
if args.path != config.get('Defaults', 'path'):
if not os.path.lexists(args.path):
os.mkdir(args.path)
config.set('Defaults', 'path', args.path)
else:
config.set('Defaults', 'path', args.path)
with open('xkcd.cfg', 'wb') as configfile:
config.write(configfile)
return vars(args)
|
summermk/dragonfly
|
dragonfly/language/other/number_arabic.py
|
Python
|
lgpl-3.0
| 5,267
| 0.016999
|
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Arabic language implementations of Integer and Digits classes
============================================================================
"""
from ..base.integer_internal import (MapIntBuilder, CollectionIntBuilder,
MagnitudeIntBuilder, IntegerContentBase)
from ..base.digits_internal import DigitsContentBase
#---------------------------------------------------------------------------
int_0 = MapIntBuilder({
"صفر": 0,
})
int_1_9 = MapIntBu
|
ilder({
|
"واحد": 1,
"اثنان": 2,
"ثلاثة": 3,
"اربعة": 4,
"خمسة": 5,
"ستة": 6,
"سبعة": 7,
"ثمانية": 8,
"تسعة": 9,
})
int_10_19 = MapIntBuilder({
"عشرة": 10,
"احدى عشر": 11,
"اثنا عشر": 12,
"ثلاثة عشر": 13,
"اربعة عشر": 14,
"خمسة عشر": 15,
"ستة عشر": 16,
"سبعة عشر": 17,
"ثمانية عشر": 18,
"تسعة عشر": 19,
})
int_20_90_10 = MapIntBuilder({
"عشرون": 2,
"ثلاثون": 3,
"اربعون": 4,
"خمسون": 5,
"ستون": 6,
"سبعون": 7,
"ثمانون": 8,
"تسعون": 9,
})
int_20_99 = MagnitudeIntBuilder(
factor = 10,
spec = "<multiplier> [<remainder>]",
multipliers = [int_20_90_10],
remainders = [int_1_9],
)
int_and_1_99 = CollectionIntBuilder(
spec = "[و] <element>",
set = [int_1_9, int_10_19, int_20_99],
)
int_100s = MagnitudeIntBuilder(
factor = 100,
spec = "[<multiplier>] hundred [<remainder>]",
multipliers = [int_1_9],
remainders = [int_and_1_99],
)
int_100big = MagnitudeIntBuilder(
factor = 100,
spec = "[<multiplier>] hundred [<remainder>]",
multipliers = [int_10_19, int_20_99],
remainders = [int_و_1_99]
)
int_1000s = MagnitudeIntBuilder(
factor = 1000,
spec = "[<multiplier>] thousand [<remainder>]",
multipliers = [int_1_9, int_10_19, int_20_99, int_100s],
remainders = [int_و_1_99, int_100s]
)
int_1000000s = MagnitudeIntBuilder(
factor = 1000000,
spec = "[<multiplier>] million [<remainder>]",
multipliers = [int_1_9, int_10_19, int_20_99, int_100s, int_1000s],
remainders = [int_و_1_99, int_100s, int_1000s],
)
#---------------------------------------------------------------------------
class IntegerContent(IntegerContentBase):
builders = [int_0, int_1_9, int_10_19, int_20_99,
int_100s, int_100big, int_1000s, int_1000000s]
class DigitsContent(DigitsContentBase):
digits = [("صفر", "اووه"), "واحد", "اثنان", "ثلاثة", "اربعة",
"خمسة", "ستة", "سبعة", "ثمانية", "تسعة"]
|
CyberLabs-BR/face_detect
|
pyimagesearch/nn/mxconv/__init__.py
|
Python
|
mit
| 245
| 0.004082
|
# import the necessary packages
from .mxalexnet import MxAlexNet
from .mxvggnet import M
|
xVGGN
|
et
from .mxgooglenet import MxGoogLeNet
from .mxresnet import MxResNet
from .mxsqueezenet import MxSqueezeNet
from .mxagegendernet import MxAgeGenderNet
|
Ch00k/gun
|
setup.py
|
Python
|
mit
| 728
| 0.034341
|
#!/usr/bin/env python
from setuptools import setup
from gun import __version__
setup(
name = 'gun',
version = __version__,
description = 'Gentoo Updates Notifier',
author = 'Andriy Yurchuk',
author_email = 'ayurchuk@minuteware.net',
url = 'https://github.com/Ch00k/gun',
license = 'LICENSE.txt',
long_description = open('README.rst').read(),
entry_points = {
'console_scripts': [
'gun = gun.sync:main'
|
]
},
packages = ['gun'],
data_files = [('/etc/gun/', ['data/gun.conf'])],
install_requires =
|
['xmpppy >= 0.5.0-rc1']
)
|
gedakc/manuskript
|
manuskript/converters/__init__.py
|
Python
|
gpl-3.0
| 1,027
| 0.000974
|
#!/usr/bin/env python
# --!-- coding: utf8 --!--
"""
The converters package provide functions to quickly convert on the fly from
one format to another. It is responsible to check what external library are
present, and do the job as best as possible with what we have in hand.
"""
from manuskript.converters.abstractConverter import abstractConverter
from manuskript.converters.pandocConverter import pandocConverter
#from manuskript.converters.markdownConverter import markdownConverter
def HTML2MD(html):
# Convert using pandoc
if pandocConverter.isValid():
return pandocConverter.convert(html, _from="html", to="markdown")
# Convert to plain text using QTextEdit
return HTML2PlainText(html)
def HTML2PlainText(html):
"""
Co
|
nvert from HT
|
ML to plain text.
"""
if pandocConverter.isValid():
return pandocConverter.convert(html, _from="html", to="plain")
# Last resort: probably resource inefficient
e = QTextEdit()
e.setHtml(html)
return e.toPlainText()
|
GcsSloop/PythonNote
|
PythonCode/Python进阶/模块/导入模块.py
|
Python
|
apache-2.0
| 2,071
| 0.005311
|
#coding=utf-8
#author: sloop
'''
ÈÎÎñ
PythonµÄos.pathÄ£¿éÌṩÁË isdir() ºÍ isfile()º¯Êý£¬Çëµ¼Èë¸ÃÄ£¿é£¬²¢µ÷Óú¯ÊýÅжÏÖ¸¶¨µÄĿ¼ºÍÎļþÊÇ·ñ´æÔÚ¡£
×¢Òâ:
1. ÓÉÓÚÔËÐл·¾³ÊÇÆ½Ì¨·þÎñÆ÷£¬ËùÒÔ²âÊÔµÄÒ²ÊÇ·þÎñÆ÷ÖеÄÎļþ¼ÐºÍÎļþ£¬¸Ã·þÎñÆ÷ÉÏÓÐ/data/webroot/resource/pythonÎļþ¼ÐºÍ/data/webroot/resource/python/test.txtÎļþ£¬´ó¼Ò
|
¿ÉÒÔ²âÊÔÏ¡£
2. µ±È»£¬´ó¼Ò¿ÉÒÔÔÚ±¾»úÉϲâÊÔÊÇ·ñ´æÔÚÏàÓ¦µÄÎļþ¼ÐºÍÎļþ¡£
import os
print os.path.isdir(r'C:\Windows')
print os.path.isfile(r'C:\Windows\notepad.exe')
'''
import os
print os.path.isdir(r'C:\Windows')
print os.path.isfile(r'C:\Windows\notepad.exe')
'''
×¢Òâµ½os.pathÄ£¿é¿ÉÒÔÒÔÈô¸ÉÖÖ·½Ê½µ¼È룺
import os
import os.path
from os import path
from os.path import isdir, isfile
ÿһÖÖ·½Ê½µ÷Óà isdir ºÍ isfile ¶¼ÓÐËù²»Í¬¡£
²Î¿¼
|
´úÂë:
import os
print os.path.isdir(r'/data/webroot/resource/python')
print os.path.isfile(r'/data/webroot/resource/python/test.txt')
'''
'''
µ¼ÈëÄ£¿é
ҪʹÓÃÒ»¸öÄ£¿é£¬ÎÒÃDZØÐëÊ×Ïȵ¼Èë¸ÃÄ£¿é¡£PythonʹÓÃimportÓï¾äµ¼ÈëÒ»¸öÄ£¿é¡£ÀýÈ磬µ¼Èëϵͳ×Ô´øµÄÄ£¿é math£º
import math
Äã¿ÉÒÔÈÏΪmath¾ÍÊÇÒ»¸öÖ¸ÏòÒѵ¼ÈëÄ£¿éµÄ±äÁ¿£¬Í¨¹ý¸Ã±äÁ¿£¬ÎÒÃÇ¿ÉÒÔ·ÃÎÊmathÄ£¿éÖÐËù¶¨ÒåµÄËùÓй«¿ªµÄº¯Êý¡¢±äÁ¿ºÍÀࣺ
>>> math.pow(2, 0.5) # powÊǺ¯Êý
1.4142135623730951
>>> math.pi # piÊDZäÁ¿
3.141592653589793
Èç¹ûÎÒÃÇֻϣÍûµ¼ÈëÓõ½µÄmathÄ£¿éµÄij¼¸¸öº¯Êý£¬¶ø²»ÊÇËùÓк¯Êý£¬¿ÉÒÔÓÃÏÂÃæµÄÓï¾ä£º
from math import pow, sin, log
ÕâÑù£¬¿ÉÒÔÖ±½ÓÒýÓà pow, sin, log Õâ3¸öº¯Êý£¬µ«mathµÄÆäËûº¯ÊýûÓе¼Èë½øÀ´£º
>>> pow(2, 10)
1024.0
>>> sin(3.14)
0.0015926529164868282
Èç¹ûÓöµ½Ãû×Ö³åÍ»Ôõô°ì£¿±ÈÈçmathÄ£¿éÓÐÒ»¸ölogº¯Êý£¬loggingÄ£¿éÒ²ÓÐÒ»¸ölogº¯Êý£¬Èç¹ûͬʱʹÓã¬ÈçºÎ½â¾öÃû×Ö³åÍ»£¿
Èç¹ûʹÓÃimportµ¼ÈëÄ£¿éÃû£¬ÓÉÓÚ±ØÐëͨ¹ýÄ£¿éÃûÒýÓú¯ÊýÃû£¬Òò´Ë²»´æÔÚ³åÍ»£º
import math, logging
print math.log(10) # µ÷ÓõÄÊÇmathµÄlogº¯Êý
logging.log(10, 'something') # µ÷ÓõÄÊÇloggingµÄlogº¯Êý
Èç¹ûʹÓà from...import µ¼Èë log º¯Êý£¬ÊƱØÒýÆð³åÍ»¡£Õâʱ£¬¿ÉÒÔ¸øº¯ÊýÆð¸ö¡°±ðÃû¡±À´±ÜÃâ³åÍ»£º
from math import log
from logging import log as logger # loggingµÄlogÏÖÔÚ±ä³ÉÁËlogger
print log(10) # µ÷ÓõÄÊÇmathµÄlog
logger(10, 'import from logging') # µ÷ÓõÄÊÇloggingµÄlog
'''
|
sklnet/opendroid-enigma2
|
lib/python/Plugins/SystemPlugins/DeviceManager/HddSetup.py
|
Python
|
gpl-2.0
| 8,530
| 0.030481
|
# for localized messages
from . import _
from enigma import *
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Sources.List import List
from Tools.Directories import resolveFilename, SCOPE_CURRENT_PLUGIN
from Tools.LoadPixmap import LoadPixmap
from Components.Button import Button
from Components.Label import Label
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from HddPartitions import HddPartitions
from HddInfo import HddInfo
from Disks import Disks
from ExtraMessageBox import ExtraMessageBox
from ExtraActionBox import ExtraActionBox
from MountPoints import MountPoints
from boxbranding import getMachineBrand, getMachineName
import os
import sys
def DiskEntry(model, size, removable):
if removable:
pictur
|
e = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/DeviceManager/icons/diskusb.png"));
else:
picture = LoadPi
|
xmap(cached = True, path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/DeviceManager/icons/disk.png"));
return (picture, model, size)
class HddSetup(Screen):
skin = """
<screen name="HddSetup" position="center,center" size="560,430" title="Hard Drive Setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget name="key_red" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_green" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="key_yellow" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="key_blue" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="menu" render="Listbox" position="20,45" size="520,380" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryPixmapAlphaTest(pos = (5, 0), size = (48, 48), png = 0),
MultiContentEntryText(pos = (65, 10), size = (330, 38), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 1),
MultiContentEntryText(pos = (405, 10), size = (125, 38), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 2),
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 50
}
</convert>
</widget>
</screen>"""
def __init__(self, session, args = 0):
self.session = session
Screen.__init__(self, session)
self.disks = list ()
self.mdisks = Disks()
for disk in self.mdisks.disks:
capacity = "%d MB" % (disk[1] / (1024 * 1024))
self.disks.append(DiskEntry(disk[3], capacity, disk[2]))
self["menu"] = List(self.disks)
self["key_red"] = Button(_("Mounts"))
self["key_green"] = Button(_("Info"))
self["key_yellow"] = Button(_("Initialize"))
self["key_blue"] = Button(_("Exit"))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.quit,
"yellow": self.yellow,
"green": self.green,
"red": self.red,
"cancel": self.quit,
}, -2)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Devicelist"))
def isExt4Supported(self):
return "ext4" in open("/proc/filesystems").read()
def mkfs(self):
self.formatted += 1
return self.mdisks.mkfs(self.mdisks.disks[self.sindex][0], self.formatted, self.fsresult)
def refresh(self):
self.disks = list ()
self.mdisks = Disks()
for disk in self.mdisks.disks:
capacity = "%d MB" % (disk[1] / (1024 * 1024))
self.disks.append(DiskEntry(disk[3], capacity, disk[2]))
self["menu"].setList(self.disks)
def checkDefault(self):
mp = MountPoints()
mp.read()
if not mp.exist("/media/hdd"):
mp.add(self.mdisks.disks[self.sindex][0], 1, "/media/hdd")
mp.write()
mp.mount(self.mdisks.disks[self.sindex][0], 1, "/media/hdd")
os.system("/bin/mkdir -p /media/hdd/movie")
message = _("Fixed mounted first initialized Storage Device to /media/hdd. It needs a system restart in order to take effect.\nRestart your %s %s now?") % (getMachineBrand(), getMachineName())
mbox = self.session.openWithCallback(self.restartBox, MessageBox, message, MessageBox.TYPE_YESNO)
mbox.setTitle(_("Restart %s %s") % (getMachineBrand(), getMachineName()))
def restartBox(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 2)
def format(self, result):
if result != 0:
self.session.open(MessageBox, _("Cannot format partition %d") % (self.formatted), MessageBox.TYPE_ERROR)
if self.result == 0:
if self.formatted > 0:
self.checkDefault()
self.refresh()
return
elif self.result > 0 and self.result < 3:
if self.formatted > 1:
self.checkDefault()
self.refresh()
return
elif self.result == 3:
if self.formatted > 2:
self.checkDefault()
self.refresh()
return
elif self.result == 4:
if self.formatted > 3:
self.checkDefault()
self.refresh()
return
self.session.openWithCallback(self.format, ExtraActionBox, _("Formatting partition %d") % (self.formatted + 1), _("Initialize disk"), self.mkfs)
def fdiskEnded(self, result):
if result == 0:
self.format(0)
elif result == -1:
self.session.open(MessageBox, _("Cannot umount current device.\nA record in progress, timeshift or some external tools (like samba, swapfile and nfsd) may cause this problem.\nPlease stop this actions/applications and try again"), MessageBox.TYPE_ERROR)
else:
self.session.open(MessageBox, _("Partitioning failed!"), MessageBox.TYPE_ERROR)
def fdisk(self):
return self.mdisks.fdisk(self.mdisks.disks[self.sindex][0], self.mdisks.disks[self.sindex][1], self.result, self.fsresult)
def initialaze(self, result):
if not self.isExt4Supported():
result += 1
if result != 4:
self.fsresult = result
self.formatted = 0
mp = MountPoints()
mp.read()
mp.deleteDisk(self.mdisks.disks[self.sindex][0])
mp.write()
self.session.openWithCallback(self.fdiskEnded, ExtraActionBox, _("Partitioning..."), _("Initialize disk"), self.fdisk)
def chooseFSType(self, result):
if result != 5:
self.result = result
if self.isExt4Supported():
self.session.openWithCallback(self.initialaze, ExtraMessageBox, _("Format as"), _("Partitioner"),
[ [ "Ext4", "partitionmanager.png" ],
[ "Ext3", "partitionmanager.png" ],
[ "NTFS", "partitionmanager.png" ],
[ "Fat32", "partitionmanager.png" ],
[ _("Cancel"), "cancel.png" ],
], 1, 4)
else:
self.session.openWithCallback(self.initialaze, ExtraMessageBox, _("Format as"), _("Partitioner"),
[ [ "Ext3", "partitionmanager.png" ],
[ "NTFS", "partitionmanager.png" ],
[ "Fat32", "partitionmanager.png" ],
[ _("Cancel"), "cancel.png" ],
], 1, 3)
def yellow(self):
if len(self.mdisks.disks) > 0:
self.sindex = self['menu'].getIndex()
self.session.openWithCallback(self.chooseFSType, ExtraMessageBox, _("Please select your preferred configuration."), _("Partitioner"),
[ [ _("One partition"), "partitionmanager.png" ],
[ _("Two partitions (50% - 50%)"), "partitionmanager.png" ],
[ _("Two partitions (75% - 25%)"), "partitionmanager.png" ],
[ _("Three partitions (33% - 33% - 33%)"), "partitionmanager.png" ],
[ _("Four partitions (25% - 25% - 25% - 25%)"), "partitionmanager.png" ],
[ _("Cancel"), "cancel.png" ],
], 1, 5)
def green(self):
if len(self.mdisks.disks) > 0:
self.sindex = self['menu'].getIndex()
self.session.open(HddInfo, self.mdisks.disks[self.sindex][0])
def red(self)
|
Baldanos/hydrafw
|
src/build-scripts/dfu-convert.py
|
Python
|
apache-2.0
| 5,855
| 0.029889
|
#!/usr/bin/python2
# Written by Antonio Galea - 2010/11/18
# Distributed under Gnu LGPL 3.0
# see http://www.gnu.org/licenses/lgpl-3.0.txt
import sys,struct,zlib,os
from optparse import OptionParser
from intelhex import IntelHex
DEFAULT_DEVICE="0x0483:0xdf11"
DEFAULT_REVISION="0.0"
def named(tuple,names):
return dict(list(zip(names.split(),tuple)))
def consume(fmt,data,names):
n = struct.calcsize(fmt)
return named(struct.unpack(fmt,data[:n]),names),data[n:]
def cstring(string):
return string.split(b'\0',1)[0]
def compute_crc(data):
return 0xFFFFFFFF & -zlib.crc32(data) -1
def rev2byte(revision):
ma,mi = revision.split('.')
return (int(ma,16)<<8 | int(mi,16))
def parse(file,dump_images=False):
print('File: "%s"' % file)
data = open(file,'rb').read()
crc = compute_crc(data[:-4])
prefix, data = consume('<5sBIB',data,'signature version size targets')
print('%(signature)s v%(version)d, image size: %(size)d, targets: %(targets)d' % prefix)
for t in range(prefix['targets']):
tprefix, data = consume('<6sBI255s2I',data,'signature altsetting named name size elements')
tprefix['num'] = t
if tprefix['named']:
tprefix['name'] = cstring(tprefix['name'])
else:
tprefix['name'] = ''
print('%(signature)s %(num)d, alt setting: %(altsetting)s, name: "%(name)s", size: %(size)d, elements: %(elements)d' % tprefix)
tsize = tprefix['size']
target, data = data[:tsize], data[tsize:]
for e in range(tprefix['elements']):
eprefix, target = consume('<2I',target,'address size')
eprefix['num'] = e
print(' %(num)d, address: 0x%(address)08x, size: %(size)d' % eprefix)
esize = eprefix['size']
image, target = target[:esize], target[esize:]
if dump_images:
out = '%s.target%d.image%d.bin' % (file,t,e)
open(out,'wb').write(image)
print(' DUMPED IMAGE TO "%s"' % out)
if len(target):
print("target %d: PARSE ERROR" % t)
suffix = named(struct.unpack('<4H3sBI',data[:16]),'device product vendor dfu ufd len crc')
print('usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x' % suffix)
if crc != suffix['crc']:
print("CRC ERROR: computed crc32 is 0x%08x" % crc)
data = data[16:]
if data:
print("PARSE ERROR")
def build(file,targets,device=DEFAULT_DEVICE, revision="0.0"):
data = b''
for t,target in enumerate(targets):
tdata = b''
for image in target:
tdata += struct.pack('<2I',image['address'],len(image['data']))+image['data']
tdata = struct.pack('<6sBI255s2I',b'Target',0,1,b'ST...',len(tdata),len(target)) + tdata
data += tdata
data = struct.pack('<5sBIB',b'DfuSe',1,len(data)+11,len(targets)) + data
v,d=[int(x,0) & 0xFFFF for x in device.split(':',1)]
rev = rev2byte(revision)
data += struct.pack('<4H3sB',rev,d,v,0x011a,b'UFD',16)
crc = compute_crc(data)
data += struct.pack('<I',crc)
open(file,'wb').write(data)
if __name__=="__main__":
usage = """
%prog [-d|--dump] infile.dfu
%prog {-b|--build} address:file.bin [-b address:file.bin ...] [{-D|--device}=vendor:device] outfile.dfu
%prog {-i|--ihex} file.hex [-i file.hex ...] [{-D|--device}=vendor:device] outfile.dfu"""
parser = OptionParser(usage=usage)
parser.add_option("-b", "--build", action="append", dest="binfiles",
help="build a DFU file from given BINFILES", metavar="BINFILES")
parser.add_option("-i", "--ihex", action="append", dest="hexfiles",
help="build a DFU file from given HEXFILES", metavar="HEXFILES")
parser.add_option("-D", "--device", action="store", dest="device",
help="build for DEVICE, defaults to %s" % DEFAULT_DEVICE, metavar="DEVICE")
parser.add_option("-r", "--revision", action="store", dest="revision",
help="Revision number, defaults to %s" % DEFAULT_REVISION, metavar="REVISION")
parser.add_option("-d", "--dump", action="store_true", dest="dump_images",
default=False, help="dump contained images to current directory")
(options, args) = parser.parse_args()
if (options.binfiles or options.hexfiles) and len(args)==1:
target = []
if options.binfiles:
for arg in options.binfiles:
try:
address,binfile = arg.split(':',1)
except ValueError:
print("Address:file couple '%s' invalid." % arg)
sys.exit(1)
try:
address = int(address,0) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
if not os.path.isfile(binfile):
print("Unreadable file '%s'." % binfile)
sys.exit(1)
target.append({ 'address': address, 'data': open(binfile,'rb').read() })
if options.hexfiles:
for hex in options.hexfiles:
ih = IntelHex(hex)
address = ih.minaddr()
data = ih.tobinstr()
try:
address = address & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
target.append({ 'address': a
|
ddress, 'data': data })
revision = DEFAULT_REVISION
if options.revision:
try:
rev2byte(options.revision)
revision = options.revision
except ValueError:
print("Invalid revision value.")
sys.exit(1)
outfile = args[0]
device = DEFAULT_DEVICE
if options.device:
device=options.device
try:
v,d=[int(x,0) & 0xFFFF for x in d
|
evice.split(':',1)]
except:
print("Invalid device '%s'." % device)
sys.exit(1)
build(outfile,[target],device, revision)
elif len(args)==1:
infile = args[0]
if not os.path.isfile(infile):
print("Unreadable file '%s'." % infile)
sys.exit(1)
parse(infile, dump_images=options.dump_images)
else:
parser.print_help()
sys.exit(1)
|
trondhindenes/ansible
|
test/runner/lib/cloud/vcenter.py
|
Python
|
gpl-3.0
| 5,079
| 0.002363
|
"""VMware vCenter plugin for integration tests."""
from __future__ import absolute_import, print_function
import os
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
display,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
)
class VcenterProvider(CloudProvider):
"""VMware vcenter/esx plugin. Sets up cloud resources for tests."""
DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(VcenterProvider, self).__init__(args, config_extension='.ini')
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
if os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER')
else:
self.image = 'quay.io/ansible/vcenter-test-container:1.3.0'
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(VcenterProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration fil
|
es after tests complete."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(Vce
|
nterProvider, self).cleanup()
def _setup_dynamic(self):
"""Create a vcenter simulator using docker."""
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing vCenter simulator docker container.', verbosity=1)
else:
display.info('Starting a new vCenter simulator docker container.', verbosity=1)
if not self.args.docker and not container_id:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', '80:80',
'-p', '443:443',
'-p', '8080:8080',
'-p', '8989:8989',
'-p', '5000:5000', # control port for flask app in simulator
]
else:
publish_ports = []
if not os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name] + publish_ports,
)
if self.args.docker:
vcenter_host = self.DOCKER_SIMULATOR_NAME
elif container_id:
vcenter_host = self._get_simulator_address()
display.info('Found vCenter simulator container address: %s' % vcenter_host, verbosity=1)
else:
vcenter_host = 'localhost'
self._set_cloud_config('vcenter_host', vcenter_host)
def _get_simulator_address(self):
results = docker_inspect(self.args, self.container_name)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def _setup_static(self):
raise NotImplementedError()
class VcenterEnvironment(CloudEnvironment):
"""VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
# Send the container IP down to the integration test(s)
env['vcenter_host'] = self._get_cloud_config('vcenter_host')
|
fernandog/Medusa
|
medusa/providers/torrent/html/speedcd.py
|
Python
|
gpl-3.0
| 7,255
| 0.002205
|
# coding=utf-8
"""Provider code for Speed.cd."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class SpeedCDProvider(TorrentProvider):
"""SpeedCD Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(SpeedCDProvider, self).__init__('Speedcd')
# Credentials
self.username = None
self.password = None
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'browse.php'),
'login_post': None,
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
self.freeleech = False
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c2': 1, # TV/Episodes
'c30': 1, # Anime
'c41': 1, # TV/Packs
'c49': 1, # TV/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
'freeleech': 'on' if self.freeleech else None
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['search'] = search_string
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
torrent_table = torrent_table.find('table') if torrent_table else None
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
try:
title = cells[1].find('a', class_='torrent').get_text()
if cells[1].find('a', class_='torrent') else None
download_url = urljoin(self.url,
cells[2].find(title='Download').parent['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[5].get_text(strip=True))
leechers = try_int(cells[6].get_text(strip=True))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
torrent_size = cells[4].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
if not self.urls['login_post'] and not self.login_url
|
():
log.debug('Unable to get login URL')
return False
response = self.session.post(self.urls['login_post'], data=login_params)
if not response or not response.text:
log.debug('Unable to connect to provider using login URL: {url}',
{'url': self.urls['login_post']})
return False
if 'incorrect username or
|
password. please try again' in response.text.lower():
log.warning('Invalid username or password. Check your settings')
return False
return True
log.warning('Unable to connect to provider')
return
def login_url(self):
"""Get the login url (post) as speed.cd keeps changing it."""
response = self.session.get(self.urls['login'])
if not response or not response.text:
log.debug('Unable to connect to provider to get login URL')
return
data = BS4Parser(response.text, 'html5lib')
login_post = data.soup.find('form', id='loginform').get('action')
if login_post:
self.urls['login_post'] = urljoin(self.url, login_post)
return True
provider = SpeedCDProvider()
|
mmirecki/ovirt-provider-mock
|
vifdriver/vif_driver.py
|
Python
|
gpl-2.0
| 1,986
| 0
|
from abc import abstractmethod
class VIFDriver(object):
@abstractmethod
def after_device_destroy(self, environ, domxml):
return domxml
@abstractmethod
def after_device_create(self, environ, domxml):
return domxml
@abstractmethod
def after_network_setup(self, environ, json_content):
return json_content
@abstractmethod
def after_nic_hotplug(self, environ, domxml):
return domxml
@abstractmethod
def after_nic_unplug(self, environ, domxml):
return domxml
@abstractmethod
def after_get_caps(self, environ, json_content):
return json_content
@abstractmethod
def after_get_stats(self, environ, json_content):
return json_content
@abstractmethod
def after_vm_start(self, environ, domxml):
return domxml
def after_migration_source(self, environ, domxml):
return domxml
def after_migration_destination(self, environ, domxml):
return domxml
@abstractmethod
def before_get_caps(self, environ, json_content):
return json_content
@abstractmethod
def before_get_stats(self, environ, json_content):
return json_content
@abstractmethod
def before_nic_hotplug(self, environ, domxml):
return domxml
@abstractmethod
def before_nic_unplug(self, environ, domxml):
return domxml
@abstractmethod
def before_device_create(self, environ, domxml):
return domxml
@abstractmethod
def before_device_destroy(self, environ, domxml):
return domxml
@abstractmethod
def before_migr
|
ation_source(self, environ, domxml):
return domxml
@abstractmethod
def before_migration_destination(self, environ, domxml)
|
:
return domxml
@abstractmethod
def before_network_setup(self, environ, json_content):
return json_content
@abstractmethod
def before_vm_start(self, environ, domxml):
return domxml
|
saukrIppl/seahub
|
thirdpart/djangorestframework-3.3.2-py2.7.egg/rest_framework/relations.py
|
Python
|
apache-2.0
| 18,087
| 0.000829
|
# coding: utf-8
from __future__ import unicode_literals
from collections import OrderedDict
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.core.urlresolvers import (
NoReverseMatch, Resolver404, get_script_prefix, resolve
)
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.fields import (
Field, empty, get_attribute, is_simple_callable, iter_options
)
from rest_framework.reverse import reverse
from rest_framework.utils import html
class Hyperlink(six.text_type):
"""
A string like object that additionally has an associated name.
We use this for hyperlinked URLs that may render as a named link
in some contexts, or render as a plain URL in others.
"""
def __new__(self, url, name):
ret = six.text_type.__new__(self, url)
ret.name = name
return ret
def __getnewargs__(self):
return(str(self), self.name,)
is_hyperlink = True
class PKOnlyObject(object):
"""
This is a mock object, used for when we only need the pk of the object
instance, but still want to return an object with a .pk attribute,
in order to keep the same interface as a regular model instance.
"""
def __init__(self, pk):
self.pk = pk
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
MANY_RELATION_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty'
)
class RelatedField(Field):
queryset = None
html_cutoff = 1000
html_cutoff_text = _('More than {count} items...')
def __init__(self, **kwargs):
self.queryset = kwargs.pop('queryset', self.queryset)
self.html_cutoff = kwargs.pop('html_cutoff', self.html_cutoff)
self.html_cut
|
off_text = kwargs.pop('html_cutoff_text', self.html_cutoff_text)
assert self.queryset is not None or kwargs.get('read_only', None), (
'Relational f
|
ield must provide a `queryset` argument, '
'or set read_only=`True`.'
)
assert not (self.queryset is not None and kwargs.get('read_only', None)), (
'Relational fields should not provide a `queryset` argument, '
'when setting read_only=`True`.'
)
kwargs.pop('many', None)
kwargs.pop('allow_empty', None)
super(RelatedField, self).__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ManyRelatedField` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super(RelatedField, cls).__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs)
def run_validation(self, data=empty):
# We force empty strings to None values for relational fields.
if data == '':
data = None
return super(RelatedField, self).run_validation(data)
def get_queryset(self):
queryset = self.queryset
if isinstance(queryset, (QuerySet, Manager)):
# Ensure queryset is re-evaluated whenever used.
# Note that actually a `Manager` class may also be used as the
# queryset argument. This occurs on ModelSerializer fields,
# as it allows us to generate a more expressive 'repr' output
# for the field.
# Eg: 'MyRelationship(queryset=ExampleModel.objects.all())'
queryset = queryset.all()
return queryset
def use_pk_only_optimization(self):
return False
def get_attribute(self, instance):
if self.use_pk_only_optimization() and self.source_attrs:
# Optimized case, return a mock object only containing the pk attribute.
try:
instance = get_attribute(instance, self.source_attrs[:-1])
value = instance.serializable_value(self.source_attrs[-1])
if is_simple_callable(value):
# Handle edge case where the relationship `source` argument
# points to a `get_relationship()` method on the model
value = value().pk
return PKOnlyObject(pk=value)
except AttributeError:
pass
# Standard case, return the object instance.
return get_attribute(instance, self.source_attrs)
@property
def choices(self):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
return OrderedDict([
(
six.text_type(self.to_representation(item)),
self.display_value(item)
)
for item in queryset
])
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.grouped_choices,
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
def display_value(self, instance):
return six.text_type(instance)
class StringRelatedField(RelatedField):
"""
A read only field that represents its targets using their
plain string representation.
"""
def __init__(self, **kwargs):
kwargs['read_only'] = True
super(StringRelatedField, self).__init__(**kwargs)
def to_representation(self, value):
return six.text_type(value)
class PrimaryKeyRelatedField(RelatedField):
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),
}
def __init__(self, **kwargs):
self.pk_field = kwargs.pop('pk_field', None)
super(PrimaryKeyRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
return True
def to_internal_value(self, data):
if self.pk_field is not None:
data = self.pk_field.to_internal_value(data)
try:
return self.get_queryset().get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
if self.pk_field is not None:
return self.pk_field.to_representation(value.pk)
return value.pk
class HyperlinkedRelatedField(RelatedField):
lookup_field = 'pk'
view_name = None
default_error_messages = {
'required': _('This field is required.'),
'no_match': _('Invalid hyperlink - No URL match.'),
'incorrect_match': _('Invalid hyperlink - Incorrect URL match.'),
'does_not_exist': _('Invalid hyperlin
|
qistoph/thug
|
src/DOM/W3C/HTML/HTMLImageElement.py
|
Python
|
gpl-2.0
| 984
| 0.014228
|
#!/usr/bin/env python
from .HTMLElement import HTMLElement
from .attr_property import attr_property
from .compatibility import thug_long
class HTMLImageElement(HTMLElement):
def __init__(self, doc, tag):
HTMLElement.__init__(self, do
|
c, tag)
align = attr_property("align")
alt = attr_property("
|
alt")
border = attr_property("border")
height = attr_property("height", thug_long)
hspace = attr_property("hspace", thug_long)
isMap = attr_property("ismap", bool)
longDesc = attr_property("longdesc")
# Removed in DOM Level 2
#lowSrc = attr_property("lowsrc")
name = attr_property("name")
src = attr_property("src")
useMap = attr_property("usemap")
vspace = attr_property("vspace", thug_long)
width = attr_property("width", thug_long)
@property
def complete(self):
return True
|
cosynus/python
|
acedefconfig.py
|
Python
|
mit
| 1,800
| 0.001111
|
'''
AceProxy default configuration script
DO NOT EDI
|
T THIS FILE!
Copy this file to aceconfig.py and change only needed options.
'''
import logging
import platform
from aceclient.acemessages import AceConst
class AceDefConfig(object):
acespawn = False
acecmd = "acestreamengine --client-console"
acekey = 'n51LvQoTlJzNGaFxseRK-uvnvX-sD4Vm5Axwmc4UcoD-jruxmKsuJaH0eVgE'
acehost = '127.0.0.1'
aceport = 62062
|
aceage = AceConst.AGE_18_24
acesex = AceConst.SEX_MALE
acestartuptimeout = 10
aceconntimeout = 5
aceresulttimeout = 10
debug = logging.DEBUG
#
httphost = '0.0.0.0'
httpport = 8000
aceproxyuser = ''
firewall = False
firewallblacklistmode = False
firewallnetranges = (
'127.0.0.1',
'192.168.0.0/16',
)
maxconns = 10
loggingtoafile = False
logpath = ''
vlcuse = False
vlcuseaceplayer = False
vlcspawn = False
vlccmd = "vlc -I telnet --clock-jitter -1 --network-caching -1 --sout-mux-caching 2000 --telnet-password admin --telnet-port 4212"
vlcspawntimeout = 5
vlchost = '127.0.0.1'
vlcport = 4212
vlcoutport = 8081
vlcpass = 'admin'
vlcpreaccess = ''
vlcmux = 'ts'
vlcforceffmpeg = False
videodelay = 2
videoobey = True
videopausedelay = 2
videoseekback = 0
videodestroydelay = 3
videotimeout = 40
fakeuas = ('Mozilla/5.0 IMC plugin Macintosh', )
fakeheaderuas = ('HLS Client/2.0 (compatible; LG NetCast.TV-2012)',
'Mozilla/5.0 (DirectFB; Linux armv7l) AppleWebKit/534.26+ (KHTML, like Gecko) Version/5.0 Safari/534.26+ LG Browser/5.00.00(+mouse+3D+SCREEN+TUNER; LGE; 42LM670T-ZA; 04.41.03; 0x00000001;); LG NetCast.TV-2012 0'
)
osplatform = platform.system()
|
fmfn/UnbalancedDataset
|
examples/over-sampling/plot_shrinkage_effect.py
|
Python
|
mit
| 3,955
| 0.002528
|
"""
======================================================
Effect of the shrinkage factor in random over-sampling
======================================================
This example shows the effect of the shrinkage factor used to generate the
smoothed bootstrap using the
:class:`~imblearn.over_sampling.RandomOverSampler`.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import seaborn as sns
sns.set_context("poster")
# %%
# First, we will generate a toy classification dataset with only few samples.
# The ratio between the classes will be imbalanced.
from collections import Counter
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=100,
n_features=2,
n_redundant=0,
weights=[0.1, 0.9],
random_state=0,
)
Counter(y)
# %%
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# Now, we will use a :class:`~imblearn.over_sampling.RandomOverSampler` to
# generate a bootstrap for the minority class with as many samples as in the
# majority class.
from imblearn.over_sampling import RandomOverSampler
sampler = RandomOverSampler(random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# We observe that the minority samples are less transparent than the samples
# from the majority class. Indeed, it is due to the fact that these samples
# of the minority class are repeated during the bootstrap generation.
#
# We can set `shrinkage` to a floating value to add a small perturbation to the
# samples created and therefore create a smoothed bootstrap.
sampler = RandomOverSampler(shrinkage=1, random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# In this case, we see that the samples in the minority class are not
# overlapping anymore due to the added noise.
#
# The parameter `shrinkage` allows to add more or less perturbation. Let's
# add more perturbation when ge
|
nerating the smoothed bootstrap.
sampler = RandomOverSampler(shrinkage=3, random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
|
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# Increasing the value of `shrinkage` will disperse the new samples. Forcing
# the shrinkage to 0 will be equivalent to generating a normal bootstrap.
sampler = RandomOverSampler(shrinkage=0, random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
Counter(y_res)
# %%
fig, ax = plt.subplots(figsize=(7, 7))
scatter = plt.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.4)
class_legend = ax.legend(*scatter.legend_elements(), loc="lower left", title="Classes")
ax.add_artist(class_legend)
ax.set_xlabel("Feature #1")
_ = ax.set_ylabel("Feature #2")
plt.tight_layout()
# %%
# Therefore, the `shrinkage` is handy to manually tune the dispersion of the
# new samples.
|
pythonchelle/opencomparison
|
apps/searchv1/urls.py
|
Python
|
mit
| 1,067
| 0.049672
|
from django.conf.urls.defaults import *
from searchv1.views import (
search, find_grids_autocomplete, find_packages_autocomplete, search_by_function_autocomplete,
search_by_category_autocomplete)
urlpatterns = patterns("",
url(
regex = '^$',
view = search,
name = 'search',
|
),
url(
regex = '^grids/autocomplete/$',
view = search_by_function_autocomplete,
name = 'search_grids_autocomplete',
kwargs = dict(
search_function=find_grids_autocomplete,
)
),
url(
regex = '^packages/autocomplete/$',
view = search_by_function_autocomplete,
name = 'search_packages_auto
|
complete',
kwargs = dict(
search_function=find_packages_autocomplete,
)
),
url(
regex = '^packages/by-category/autocomplete/$',
view = search_by_category_autocomplete,
name = 'search_by_category_autocomplete',
),
)
|
gkc1000/pyscf
|
examples/local_orb/pmloc.py
|
Python
|
apache-2.0
| 8,521
| 0.064312
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Localization funciton: pmloc.loc(mol,mocoeff)
#
# -ZL@20151227- Add Boys. Note that to view LMOs
# in jmol, 'orbital energies' need
# to be defined and used to reorder
# LMOs.
# -ZL@20151225- Pipek-Mezey localizaiton
#
import math
import numpy
import scipy.linalg
from pyscf.tools import molden
#------------------------------------------------
# Initialization
#------------------------------------------------
def sqrtm(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v*numpy.sqrt(e), v.T.conj())
def lowdin(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v/numpy.sqrt(e), v.T.conj())
#def scdmU(coeff,ova):
# aux = numpy.identity(ova.shape[0])
# #aux = lowdin(ova)
# no = coeff.shape[1]
# ova = reduce(numpy.dot,(coeff.T,ova,aux))
# # ova = no*nb
# q,r,piv = scipy.linalg.qr(ova, pivoting=True)
# # In fact, it is just "Lowdin-orthonormalized PAO".
# bc = ova[:,piv[:no]]
# ova = numpy.dot(bc.T,bc)
# s12inv = lowdin(ova)
# u = numpy.dot(bc,s12inv)
# return u
#------------------------------------------------
# Boys/PM-Localization
#------------------------------------------------
def loc(mol,mocoeff,tol=1.e-6,maxcycle=1000,iop=0):
partition = genAtomPartition(mol)
ova = mol.intor_symmetric("cint1e_ovlp_sph")
ierr,u = loc_kernel(mol,mocoeff,ova,partition,tol,maxcycle,iop)
#ierr = 0
#u = scdmU(mocoeff,ova)
#if iop <= 2:
# mocoeff2 = mocoeff.dot(u)
# ierr,u2 = loc_kernel(mol,mocoeff2,ova,partition,tol,maxcycle,iop)
# u = u.dot(u2)
return ierr,u
def loc_kernel(mol,mocoeff,ova,partition,tol,maxcycle,iop):
debug = False
print
print '[pm_loc_kernel]'
print ' mocoeff.shape=',mocoeff.shape
print ' tol=',tol
print ' maxcycle=',maxcycle
print ' partition=',len(partition),'\n',partition
k = mocoeff.shape[0]
n = mocoeff.shape[1]
natom = len(partition)
def genPaij(mol,mocoeff,ova,partition,iop):
c = mocoeff.copy()
# Mulliken matrix
if iop == 0:
cts = c.T.dot(ova)
natom = len(partition)
pija = numpy.zeros((natom,n,n))
for iatom in range(natom):
idx = partition[iatom]
tmp = numpy.dot(cts[:,idx],c[idx,:])
pija[iatom] = 0.5*(tmp+tmp.T)
# Lowdin
elif iop == 1:
s12 = sqrtm(ova)
s12c = s12.T.dot(c)
natom = len(partition)
pija = numpy.zeros((natom,n,n))
for iatom in range(natom):
idx = partition[iatom]
pija[iatom] = numpy.dot(s12c[idx,:].T,s12c[idx,:])
# Boys
elif iop == 2:
rmat = mol.intor_symmetric('cint1e_r_sph',3)
pija = numpy.zeros((3,n,n))
for icart in range(3):
pija[icart] = reduce(numpy.dot,(c.T,rmat[icart],c))
# P[i,j,a]
pija = pija.transpose(1,2,0).copy()
return pija
## Initial from random unitary
#iden = numpy.identity(n)
#iden += 1.e-2*numpy.random.uniform(-1,1,size=n*n).reshape(n,n)
#u,r = scipy.linalg.qr(iden)
#mou = mocoeff.dot(u)
#pija = genPaij(mol,mou,ova,partition,iop)
u = numpy.identity(n)
pija = genPaij(mol,mocoeff,ova,partition,iop)
if debug: pija0 = pija.copy()
# Start
def funval(pija):
return numpy.einsum('iia,iia',pija,pija)
fun = funval(pija)
print ' initial funval = ',fun
#
# Iteration
#
for icycle in range(maxcycle):
delta = 0.0
# i>j
ijdx = []
for i in range(n-1):
for j in range(i+1,n):
bij = abs(numpy.sum(pija[i,j]*(pija[i,i]-pija[j,j])))
ijdx.append((i,j,bij))
# Without pivoting
# 6-31g: icycle= 184 delta= 5.6152945523e-09 fun= 54.4719738182
# avtz : icycle= 227 delta= 4.43639097128e-09 fun= 3907.60402435
# With pivoting - significantly accelerated!
# 6-31g: icycle= 71 delta= 7.3566217445e-09 fun= 54.4719739144
# avdz : icycle= 28 delta= 2.31739493914e-10 fun= 3907.60402153
# The delta value generally decay monotonically (adjoint diagonalization)
ijdx = sorted(ijdx,key=lambda x:x[2],reverse=True)
for i,j,bij in ijdx:
#
# determine angle
#
vij = pija[i,i]-pija[j,j]
aij = numpy.dot(pija[i,j],pija[i,j]) \
- 0.25*numpy.dot(vij,vij)
bij = numpy.dot(pija[i,j],vij)
if abs(aij)<1.e-10 and abs(bij)<1.e-10: continue
p1 = math.sqrt(aij**2+bij**2)
cos4a = -aij/p1
sin4a = bij/p1
cos2a = math.sqrt((1+cos4a)*0.5)
sin2a = math.sqrt((1-cos4a)*0.5)
cosa = math.sqrt((1+cos2a)*0.5)
sina = math.sqrt((1-cos2a)*0.5)
# Why? Because we require alpha in [0,pi/2]
if sin4a < 0.0:
cos2a = -cos2a
sina,cosa = cosa,sina
# stationary condition
if abs(cosa-1.0)<1.e-10: continue
if abs(sina-1.0)<1.e-10: continue
# incremental value
delta += p1*(1-cos4a)
#
# Transformation
#
if debug:
g = numpy.identity(n)
g[i,i] = cosa
g[j,j] = cosa
g[i,j] = -sina
g[j,i] = sina
ug = u.dot(g)
pijag = numpy.einsum('ik,jl,ija->kla',ug,ug,pija0)
# Urot
ui = u[:,i]*cosa+u[:,j]*sina
uj = -u[:,i]*sina+u[:,j]*cosa
u[:,i] = ui.copy()
u[:,j] = uj.copy()
# Bra-transformation of Integrals
tmp_ip = pija[i,:,:]*cosa+pija[j,:,:]*sina
tmp_jp = -pija[i,:,:]*sina+pija[j,:,:]*cosa
pija[i,:,:] = tmp_ip.copy()
pija[j,:,:] = tmp_jp.copy()
# Ket-transformation of Integrals
tmp_ip = pija[:,i,:]*cosa+pija[:,j,:]*sina
tmp_jp = -pija[:,i,:]*sina+pija[:,j,:]*cosa
pija[:,i,:] = tmp_ip.copy()
pija[:,j,:] = tmp_jp.copy()
if debug:
diff1 = numpy.linalg.norm(u-ug)
diff2 = numpy.linalg.norm(pija-pijag)
cu = numpy.dot(mocoeff,u)
pija2 = genPaij(cu,ova,partition)
fun2 = funval(pija2)
diff = abs(fun+delta-fun2)
print 'diff(u/p/f)=',diff1,diff2,diff
if diff1>1.e-6:
print 'Error: ug',diff1
exit()
if diff2>1.e-6:
print 'Error: pijag',diff2
exit()
if diff>1.e-6:
print 'Error: inconsistency in PMloc: fun/fun2=',fun+delta,fun2
exit()
fun = fun+delta
print 'icycle=',icycle,'delta=',delta,'fun=',fun
if delta<tol: break
# Check
ierr = 0
if delta<tol:
print 'CONG: PMloc converged!'
else:
ierr=1
print 'WARNING: PMloc not converged'
return ierr,u
def genAtomPartition(mol):
part = {}
for iatom in range(mol.natm):
part[iatom]=[]
ncgto = 0
for binfo in mol._bas:
atom_id = binfo[0]
lang = binfo[1]
ncntr = binfo[3]
nbas = ncntr*(2*lang+1)
part[atom_id]+=range(ncgto,ncgto+nbas)
ncgto += nbas
partition = []
for iatom in range(mol.natm):
partition.append(part[iatom])
return partition
#
# Molden Format
#
if __name__ == '__main__':
print 'PMlocal'
from pyscf import gto,scf
mol = gto.Mole()
mol.verbose = 5 #6
fac = 0.52917721092
mol.atom = [['N',map(lambda x:x*fac,[0.0, -1.653532, 0.0])],
['N',map(lambda x:x*fac,[0.0, 1.653
|
532, 0.0])],
['O',map(lambda x:x*fac,[-2.050
|
381, -2.530377, 0.0])],
['O',map(lambda x:x*fac,[2.050381, -2.530377, 0.0])],
['O',map(lambda x:x*fac,[-2.050381, 2.530377, 0.0])],
['O',map(lambda x:x*fac,[2.050381, 2.530377, 0.0])]]
mol.basis = 'aug-cc-pvdz'
mol.charge = 0
mol.spin = 0
mol.build()
mf = scf.RHF(mol)
mf.init_guess = 'atom'
mf.level_shift = 0.0
mf.max_cycle = 100
mf.conv_tol=1.e-20
ehf=mf.scf()
nocc = mol.nelectron/2
ierr,uo = loc(mol,mf.mo_coeff[:,:nocc],iop=0)
ierr,uv = loc(mol,mf.mo_coeff[:,nocc:],iop=0)
u = s
|
frnsys/broca
|
broca/knowledge/doc2vec.py
|
Python
|
mit
| 1,614
| 0.001239
|
from gensim.models.doc2vec import Doc2Vec, LabeledSentence
from nltk.tokenize import sent_tokenize, word_tokenize
from sup.progress import Progress
def train_doc2vec(paths, out='data/model.d2v', tokenizer=word_tokenize, sentences=False, **kwargs):
"""
Train a doc2vec model on a list of files.
"""
kwargs = {
'size': 400,
'window': 8,
'min_count': 2,
'workers': 8
}.update(kwargs)
n = 0
for path in paths:
print('Counting lines for {0}...'.format(path))
n += sum(1 for line in open(path, 'r'))
print('Processing {0} lines...'.format(n))
print('Training doc2vec model...')
m = Doc2Vec(_doc2vec_doc_stream(paths, n, tokenizer=tokenizer, sentences=sentences), **kwargs)
print('Saving...')
m.save(out)
def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):
"""
Generator to feed sentences to the dov
|
2vec model.
"""
i = 0
p = Progress()
for path in paths:
with open
|
(path, 'r') as f:
for line in f:
i += 1
p.print_progress(i/n)
# We do minimal pre-processing here so the model can learn
# punctuation
line = line.lower()
if sentences:
for sent in sent_tokenize(line):
tokens = tokenizer(sent)
yield LabeledSentence(tokens, ['SENT_{}'.format(i)])
else:
tokens = tokenizer(line)
yield LabeledSentence(tokens, ['SENT_{}'.format(i)])
|
Pholey/vcfx
|
vcfx/field/explanatory/nodes.py
|
Python
|
mit
| 1,485
| 0.012795
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
from vcfx.field.nodes import Field
class
|
Categories(Field):
KEY = "CATEGORIES"
def __init__(self, *a, **kw):
super(Cat
|
egories, self).__init__(*a, **kw)
def clean_values(self, v):
self.value = v.strip("\r\n").split(",")
class Note(Field):
KEY = "NOTE"
def __init__(self, *a, **kw):
super(Note, self).__init__(*a, **kw)
class Prodid(Field):
KEY = "PRODID"
def __init__(self, *a, **kw):
super(Prodid, self).__init__(*a, **kw)
class Revision(Field):
KEY = "REV"
def __init__(self, *a, **kw):
super(Revision, self).__init__(*a, **kw)
class Sound(Field):
KEY = "SOUND"
def __init__(self, *a, **kw):
super(Sound, self).__init__(*a, **kw)
class UID(Field):
KEY = "UID"
def __init__(self, *a, **kw):
super(UID, self).__init__(*a, **kw)
class ClientPIDMap(Field):
KEY = "CLIENTPIDMAP"
def __init__(self, *a, **kw):
super(ClientPIDMap, self).__init__(*a, **kw)
class Url(Field):
KEY = "URL"
SCALAR = True
def __init__(self, *a, **kw):
super(Url, self).__init__(*a, **kw)
class Version(Field):
KEY = "VERSION"
def __init__(self, *a, **kw):
super(Version, self).__init__(*a, **kw)
|
fouk666/xoxo
|
XOXO/app/__init__.py
|
Python
|
mit
| 258
| 0.004673
|
from flask import Flask
from flask_socketio
|
import SocketIO
# создаем экземпляр класса Flask
app = Flask(__name__)
# создаем
|
экземпляр класса SocketIO
socketio = SocketIO(app)
from app.views import home
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/auxilium/tools/setup_tools.py
|
Python
|
apache-2.0
| 3,576
| 0
|
# -*- coding: utf-8 -*-
# auxilium
# --------
# Python project for an automated test and deploy toolkit.
#
# Author: sonntagsgesicht
# Version: 0.1.8, copyright Saturday, 02 October 2021
# Website: https://github.com/sonntagsgesicht/auxilium
# License: Apache License 2.0 (see LICENSE file)
from datetime import date
from logging import log, INFO
from os import getcwd, sep, walk, makedirs
from os.path import exists, join, basename
from shutil import move
from zipfile import ZipFile
from .const import ICONS
from .system_tools import open
EXT = ' (created by auxilium)'
PKG_ZIP_FILE = \
__file__.replace(join(*__file__.split(sep)[-2:]), join('data', 'pkg.zip'))
def create_project(name=None, slogan=None, author=None, email=None, url=None,
pkg_zip_file=PKG_ZIP_FILE, path=getcwd()):
"""create a new project"""
if not any((name, slogan, author, email)):
log(INFO, '')
log(INFO, 'Please enter project details.')
log(INFO, '')
name = input('Enter project name : ') if name is None else name
slogan = input('Enter project sl
|
ogan: ') if slogan is None else slogan
slogan += EXT
author = input('Enter author name : ') if author is None else author
email = input('Enter project email : ') if email is None else email
url = input('Enter project url : ') if url is None else url
url = url
|
or 'https://github.com/<author>/<name>'
project_path = join(path, name)
pkg_path = join(path, name, name)
# setup project infrastructure
if exists(project_path):
msg = 'Project dir %s exists. ' \
'Cannot create new project.' % project_path
raise FileExistsError(msg)
if not exists(pkg_zip_file):
msg = 'Project template %s does not exists. ' \
'Cannot create new project.' % pkg_zip_file
raise FileNotFoundError(msg)
with ZipFile(pkg_zip_file, 'r') as zip_ref:
zip_ref.extractall(project_path)
makedirs(pkg_path)
move(project_path + sep + '__init__.py', pkg_path)
# setup source directory
def rp(pth):
f = open(pth, 'r')
c = f.read()
f.close()
c = c.replace('<url>', url)
c = c.replace('<email>', email)
c = c.replace('<author>', author)
c = c.replace('<doc>', slogan)
c = c.replace('<name>', name)
c = c.replace('<date>', date.today().strftime('%A, %d %B %Y'))
f = open(pth, 'w')
f.write(c)
f.close()
rp(pkg_path + sep + '__init__.py')
rp(project_path + sep + 'setup.py')
rp(project_path + sep + 'README.rst')
rp(project_path + sep + 'HOWTO.rst')
rp(project_path + sep + 'CHANGES.rst')
rp(project_path + sep + 'doc' + sep + 'sphinx' + sep + 'doc.rst')
rp(project_path + sep + 'doc' + sep + 'sphinx' + sep + 'conf.py')
log(INFO, '')
log(INFO, ICONS["create"] +
'created project %s with these files:' % name)
log(INFO, ' in %s' % path)
for subdir, dirs, files in walk(name):
log(INFO, '')
for file in files:
log(INFO, ' ' + join(subdir, file))
log(INFO, '')
return project_path
def create_finish(name=basename(getcwd())):
log(INFO, ICONS["finish"] + 'project setup finished')
log(INFO, '')
log(INFO, 'Consider a first full run via: ')
log(INFO, '')
log(INFO, ' > cd %s' % name)
log(INFO, ' > auxilium test')
log(INFO, ' > auxilium doc --api')
log(INFO, ' > auxilium build')
log(INFO, ' > auxilium doc --show')
log(INFO, '')
return 0
|
numaru/injector
|
injector_cli.py
|
Python
|
gpl-3.0
| 318
| 0.003145
|
import
|
sys
from injector import Injector
def main():
path_exe = str(sys.argv[1])
path_dll = str(sys.argv[2])
injector = Injector()
pid = injector.create_process(path_exe)
injector.load_from_pid(pid)
injector.inject_dll(path_dll)
injector.unload()
if __name
|
__ == "__main__":
main()
|
jcnelson/syndicate
|
python/syndicate/observer/storage/common.py
|
Python
|
apache-2.0
| 2,548
| 0.02394
|
#!/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
from Crypto.PublicKey import RSA as CryptoKey
import logging
from logging import Logger
logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
logger = logging.getLogger()
logger.setLevel( logging.INFO )
import syndicate.syndicate as c_syndicate
#-------------------------------
def encrypt_slice_secret( observer_pkey_pem, slice_secret ):
"""
Encrypt and serialize the slice secret with the Observer private key
"""
# get the public key
try:
observer_pubkey_pem = Cr
|
yptoKey.importKey( observer_pkey_pem ).publickey().exportKey()
except Exception, e:
logger.exception(e)
logger.error("Failed to derive public key from private key")
return None
# encrypt the data
rc, sealed_slice_secret = c_syndicate.encrypt_data( observer_pkey_pem, observer_pubkey_pem, slice_secret )
if rc != 0:
logger.error("Failed to encrypt slice secret")
return None
sealed_sli
|
ce_secret_b64 = base64.b64encode( sealed_slice_secret )
return sealed_slice_secret_b64
#-------------------------------
def decrypt_slice_secret( observer_pkey_pem, sealed_slice_secret_b64 ):
"""
Unserialize and decrypt a slice secret
"""
# get the public key
try:
observer_pubkey_pem = CryptoKey.importKey( observer_pkey_pem ).publickey().exportKey()
except Exception, e:
logger.exception(e)
logger.error("Failed to derive public key from private key")
return None
sealed_slice_secret = base64.b64decode( sealed_slice_secret_b64 )
# decrypt it
rc, slice_secret = c_syndicate.decrypt_data( observer_pubkey_pem, observer_pkey_pem, sealed_slice_secret )
if rc != 0:
logger.error("Failed to decrypt '%s', rc = %d" % (sealed_slice_secret_b64, rc))
return None
return slice_secret
|
Eric-Gaudiello/tensorflow_dev
|
tensorflow_home/tensorflow_venv/lib/python3.4/site-packages/tensorflow/python/platform/default/_status_bar.py
|
Python
|
gpl-3.0
| 914
| 0.001094
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WAR
|
RANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A no-op implementation of status bar functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ imp
|
ort print_function
def SetupStatusBarInsideGoogle(unused_link_text, unused_port):
pass
|
roncohen/apm-server
|
_beats/dev-tools/cmd/dashboards/export_5x_dashboards.py
|
Python
|
apache-2.0
| 3,663
| 0.001365
|
from elasticsearch import Elasticsearch
import argparse
import os
import json
import re
def ExportDashboards(es, regex, kibana_index, output_directory):
res = es.search(
index=kibana_index,
doc_type="dashboard",
size=1000)
try:
reg_exp = re.compile(regex, re.IGNORECASE)
except:
print("Wrong regex {}".format(regex))
return
for doc in res['hits']['hits']:
if not reg_exp.match(doc["_source"]["title"]):
print("Ignore dashboard", doc["_source"]["title"])
continue
# save dashboard
SaveJson("dashboard", doc, output_directory)
# save dependencies
panels = json.loads(doc['_source']['panelsJSON'])
for panel in panels:
if panel["type"] == "visualization":
ExportVisualization(
es,
panel["id"],
kibana_index,
output_directory)
elif panel["type"] == "search":
ExportSearch(
es,
panel["id"],
kibana_index,
output_directory)
else:
print("Unknown type {} in dashboard".format(panel["type"]))
def ExportVisualization(es, visualization, kibana_index, output_directory):
doc = es.get(
index=kibana_index,
doc_type="visualization",
id=visualization)
# save visualization
SaveJson("visualizatio
|
n", doc, output_directory)
# save dependencies
if "savedSearchId" in doc["_source"]:
search = doc["_source"]['savedSearchId']
ExportSearch(
es,
search,
kibana_index,
output_directory)
def ExportSearch(es, search, kibana_index, output_directory):
doc = es.get(
index=kibana_index,
|
doc_type="search",
id=search)
# save search
SaveJson("search", doc, output_directory)
def SaveJson(doc_type, doc, output_directory):
dir = os.path.join(output_directory, doc_type)
if not os.path.exists(dir):
os.makedirs(dir)
# replace unsupported characters
filepath = os.path.join(dir, re.sub(r'[\>\<:"/\\\|\?\*]', '', doc['_id']) + '.json')
with open(filepath, 'w') as f:
json.dump(doc['_source'], f, indent=2)
print("Written {}".format(filepath))
def main():
parser = argparse.ArgumentParser(
description="Export the Kibana dashboards together with"
" all used visualizations, searches and index pattern")
parser.add_argument("--url",
help="Elasticsearch URL. By default: http://localhost:9200",
default="http://localhost:9200")
parser.add_argument("--regex",
help="Regular expression to match all the dashboards to be exported. For example: metricbeat*",
required=True)
parser.add_argument("--kibana",
help="Elasticsearch index where to store the Kibana settings. By default: .kibana ",
default=".kibana")
parser.add_argument("--dir", help="Output directory. By default: output",
default="output")
args = parser.parse_args()
print("Export {} dashboards to {} directory".format(args.regex, args.dir))
print("Elasticsearch URL: {}".format(args.url))
print("Elasticsearch index to store Kibana's"
" dashboards: {}".format(args.kibana))
es = Elasticsearch(args.url)
ExportDashboards(es, args.regex, args.kibana, args.dir)
if __name__ == "__main__":
main()
|
nficano/jotonce.com
|
jotonce/api/passphrases.py
|
Python
|
mit
| 285
| 0
|
# -*- coding: utf-8 -*-
from flask import Bluepr
|
int
from jotonce.api import route
from jotonce.passphrases.models import Passphrase
bp = Blueprint('passphrase', __name__, url_p
|
refix='/passphrase')
@route(bp, '/')
def get():
p = Passphrase.get_random()
return {"results": p}
|
adobe-mds/dcos-cassandra-service
|
cassandra-test-client/launcher.py
|
Python
|
apache-2.0
| 8,836
| 0.007583
|
#!/usr/bin/python
'''Launches cassandra-stress instances in Marathon.'''
import json
import logging
import pprint
import random
import string
import sys
import urllib
# non-stdlib libs:
try:
import click
import requests
from requests.exceptions import HTTPError
except ImportError:
print("Failed to load third-party libraries.")
print(
|
"Please run: $ pip install -r requirements.txt")
sys.exit(1)
def __urljoin(*elements):
return "/".join(elem.strip("/") for elem in elements)
def __post(url, headers={}, json=Non
|
e):
pprint.pprint(json)
response = requests.post(url, json=json, headers=headers)
return __handle_response("POST", url, response)
def __handle_response(httpcmd, url, response):
# http code 200-299 => success!
if response.status_code < 200 or response.status_code >= 300:
errmsg = "Error code in response to %s %s: %s/%s" % (
httpcmd, url, response.status_code, response.content)
print(errmsg)
raise HTTPError(errmsg)
json = response.json()
print("Got response for %s %s:\n%s" % (httpcmd, url, json))
return json
def marathon_apps_url(cluster_uri):
url = __urljoin(cluster_uri, "marathon", "v2", "apps")
print("Marathon query: %s" % url)
return url
def marathon_launch_app(marathon_url, app_id, cmd, instances=1, packages=[], env={}, headers={}):
formatted_packages = []
for package in packages:
formatted_packages.append({"uri": package})
formatted_env = {}
for k,v in env.items():
formatted_env[str(k)] = str(v)
post_json = {
"id": app_id,
"container": {
"type": "MESOS",
},
"cmd": cmd,
"cpus": 1,
"mem": 512.0, # 512m apparently required: 128m and 256m results in FAILEDs.
"disk": 1,
"instances": instances,
"fetch": formatted_packages,
"env": formatted_env,
}
json = __post(marathon_url, headers=headers, json=post_json)
return json["deployments"]
def get_random_id(length=8):
return ''.join([random.choice(string.ascii_lowercase) for _ in range(length)])
JRE_JAVA_PATH = "jre/bin/java"
CASSANDRA_STRESS_PATH = "apache-cassandra-*/tools/bin/cassandra-stress"
DEFAULT_PORT=9042
@click.command()
@click.argument('cluster_url', envvar='DCOS_URI')
@click.option("--framework-name", show_default=True, default='cassandra',
help="framework's name in DCOS, for auto-detecting nodes")
@click.option("--writer-count", show_default=True, default=5,
help="number of writers to launch")
@click.option("--reader-count", show_default=True, default=5,
help="number of readers to launch")
@click.option("--thread-count", show_default=True, default=5,
help="number of threads to launch in each writer and reader")
@click.option("--duration", show_default=True, default='1h',
help="amount of time for readers and writers to run before exiting")
@click.option("--consistency", show_default=True, default='one',
help="consistency level to request for writers")
@click.option("--truncate", show_default=True, default='never',
help="whether to truncate writes")
@click.option("--username", envvar="DCOS_USERNAME",
help="username to use when making requests to the DCOS cluster (if the cluster requires auth)")
@click.option("--password", envvar="DCOS_PASSWORD",
help="password to use when making requests to the DCOS cluster (if the cluster requires auth)")
@click.option("--pkg-url", show_default=True, default="https://s3-us-west-2.amazonaws.com/cassandra-framework-dev/testing/apache-cassandra-2.2.5-bin.tar.gz",
help="url of the cassandra package")
@click.option("--jre-url", show_default=True, default="https://downloads.mesosphere.com/java/jre-8u121-linux-x64.tar.gz",
help="url of the jre package")
@click.option("--keyspace-override", default="",
help="keyspace to use instead of a randomized default")
@click.option("--ip-override", default=[],
help="list of node ips to use instead of what the framework returns")
@click.option("--port-override", show_default=True, default=DEFAULT_PORT,
help="node port to use. cassandra-stress lacks support for multiple ports.")
def main(
cluster_url,
framework_name,
writer_count,
reader_count,
thread_count,
duration,
consistency,
truncate,
username,
password,
pkg_url,
jre_url,
keyspace_override,
ip_override,
port_override):
"""Launches zero or more test writer and reader clients against a Cassandra framework.
The clients are launched as marathon tasks, which may be destroyed using the provided curl commands when testing is complete.
You must at least provide the URL of the cluster, for example: 'python launcher.py http://your-dcos-cluster.com'"""
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger('requests.packages.urllib3')
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
keyspace_rand_id = get_random_id() # reused for keyspace, unless --keyspace-override is specified
writer_app_id = 'cassandratest-' + keyspace_rand_id + '-writer'
reader_app_id = 'cassandratest-' + keyspace_rand_id + '-reader'
headers = {}
if username and password:
post_json = {
"uid": username,
"password": password
}
tok_response = __post(__urljoin(cluster_url, "acs/api/v1/auth/login"), json=post_json)
headers = {"Authorization": "token={}".format(tok_response["token"])}
if not ip_override:
# args didn't specify manual ips, so fetch the list with a framework RPC:
fetch_ips_path = '{}/service/{}/v1/nodes/connect/native'.format(cluster_url.rstrip("/"), framework_name)
json = __handle_response('GET', fetch_ips_path, requests.get(fetch_ips_path, headers=headers))
# strip ":port" from returned endpoints. the ":" confuses cassandra-stress, it parses them as IPv6 IPs.
ips = []
for endpoint in json:
ip, port = endpoint.split(':')
# if port arg is default, try using a port value returned by the framework
if not port_override == DEFAULT_PORT:
port_override = port
ips.append(ip)
ip_override = ','.join(ips)
print('Using node IPs: {}, Port: {}'.format(ip_override, port_override))
if not keyspace_override:
# user didn't manually specify keyspace, generate random one (matches marathon job names)
# 'can only contain alphanumeric and underscore characters'
keyspace_override = 'test_' + keyspace_rand_id
# Note that non-dashed args (eg "duration"/"truncate") must go first, followed by dashed args (eg "-node"/"-rate"):
# See the available args here: https://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStress_t.html
common_args = 'duration={} cl={} truncate={} -node {} -mode native cql3 port={} -schema keyspace={} -rate threads={}'.format(
duration, consistency, truncate, ip_override, port_override, keyspace_override, thread_count)
reader_args = "counter_read {}".format(common_args)
writer_args = "counter_write {}".format(common_args)
marathon_url = marathon_apps_url(cluster_url)
if not marathon_launch_app(
marathon_url = marathon_url,
app_id = reader_app_id,
cmd = "export JAVA_HOME=${MESOS_SANDBOX}/jre && env && ${MESOS_SANDBOX}/%s %s" % (
CASSANDRA_STRESS_PATH, reader_args),
instances = reader_count,
packages = [jre_url, pkg_url],
headers = headers):
print("Starting readers failed, skipping launch of writers")
return 1
if not marathon_launch_app(
marathon_url = marathon_url,
app_id = writer_app_id,
cmd = "export JAVA_HOME=${MESOS_SANDBOX}/jre && env && ${MESOS_SANDBOX}/%s %s" % (
CASSANDRA_STRE
|
smtheard/ShowTracker
|
controllers/show_follow.py
|
Python
|
gpl-3.0
| 1,287
| 0.002331
|
import bottle
from config import app, Session
from models.show_follow import ShowFollow
@app.get("/rest/show-follow/<show_id>")
def get_show_follow(session, show_id):
user_id = session.get("user_id")
if user_id:
sa_session = Session()
show_follow = sa_session.query(ShowFollow) \
.filter(ShowFollow.show_id == show_id, ShowFollow.user_id == user_id) \
.first()
return dict(following=(show_follow is not None), success=True)
return dict(following=False, success=True)
@app.post("/rest/show-follow/<show_id>")
def update_show_follow(session, show_id):
following = bottle.request.json.get("following")
|
user_id = session.get("user_id")
if user_id:
sa_session = Session()
if following:
sa_session.query(ShowFollow) \
.filter(ShowFollow.show_id == show_id, ShowFollow.user_id == user_id) \
.delete()
sa_session.commit()
return dict(follo
|
wing=False, success=True)
show_follow = ShowFollow(show_id=show_id, user_id=user_id)
sa_session.add(show_follow)
sa_session.commit()
return dict(following=True, success=True)
return dict(redirect="/register")
|
rueckstiess/mtools
|
mtools/util/pattern.py
|
Python
|
apache-2.0
| 7,303
| 0.011368
|
#!/usr/bin/env python3
import json
import re
import sys
def _decode_pattern_list(data):
rv = []
contains_dict = False
for item in data:
if isinstance(item, list):
item = _decode_pattern_list(item)
elif isinstance(item, dict):
item = _decode_pattern_dict(item)
contains_dict = True
rv.append(item)
# avoid sorting if any element in the list is a dict
if not contains_dict:
rv = sorted(rv)
return rv
def _decode_pattern_dict(data):
rv = {}
for key, value in data.items():
if isinstance(key, bytes):
key = key.encode('utf-8')
if isinstance(key, str):
if key in ['$in', '$gt', '$gte', '$lt', '$lte', '$exists']:
return 1
if key == '$nin':
value = 1
if key in ['query', '$query']:
try:
# Try to decode value as a dictionary; this will fail if
# there happens to be a field called "query"
return _decode_pattern_dict(value)
except:
return value
if isinstance(value, list):
value = _decode_pattern_list(value)
elif isinstance(value, dict):
value = _decode_pattern_dict(value)
else:
value = 1
rv[key] = value
return rv
def shell2json(s):
"""Convert shell syntax to json."""
replace = {
r'BinData\(.+?\)': '1',
r'(new )?Date\(.+?\)': '1',
r'Timestamp\(.+?\)': '1',
r'ObjectId\(.+?\)': '1',
r'DBRef\(.+?\)': '1',
r'undefined': '1',
r'MinKey': '1',
r'MaxKey': '1',
r'NumberLong\(.+?\)': '1',
r'/.+?/\w*': '1'
}
for key, value in replace.items():
s = re.sub(key, value, s)
return s
def json2pattern(s, debug = False):
"""
Convert JSON format to a query pattern.
Includes even mongo shell notation without quoted key names.
Pass debug = True to print additional info on each step of processing chain
"""
saved_s = s
if debug : print ("\n=======================\n", saved_s, file=sys.stderr)
# make valid JSON by wrapping field names in quotes
s, _ = re.subn(r'([{,])\s*([^,{\s\'"]+)\s*:', ' \\1 "\\2" : ', s)
if debug : print (s, file=sys.stderr)
# handle shell values that are not valid JSON
s = shell2json(s)
if debug : print (s, file=sys.stderr)
# convert to 1 where possible, to get rid of things like new Date(...)
s, _ = re.subn(r'([:,\[])\s*([^{}\[\]"]+?)\s*([,}\]])', '\\1 1 \\3', s)
if debug : print (s, file=sys.stderr)
# replace list values by 1. Not the '$in/$nin' lists, but the like of: {..., "attrib" : ["val1", "val2", "3"],...}
# updated regex, using positive lookahead and lookbehind to check for a " (quote)
# right after '[' and before ']' to correctly handle cases where a ']' is
|
part of the value and
# also cases where list values are url's "nnn://aaa.bbb" will correctly be simplified to '1'
s, _ = re.subn(r'("\S+"\s*:\s*\[\s*(?=\"))(.+)((?<=\")\s*\]\s*[,}])', '\\1 1 \\3', s)
if debug : print (s, file=sys.stderr)
# now convert to dictionary, converting unicod
|
e to ascii
doc = {}
try:
doc = json.loads(s, object_hook=_decode_pattern_dict)
except Exception as err:
if debug:
## print some context info and return without any extracted query data..
msg = f'''json2pattern():json.loads Exception:\n Error: {err} : {sys.exc_info()[0]}\n saved_s: ({saved_s})\n s: ({s})\n'''
print(msg, file=sys.stderr)
return None
except:
print (f'''json2pattern():json.loads Unexpected error: saved_s: ({saved_s}) sys.exc_info():{sys.exc_info()[0]}''' )
raise
try:
return json.dumps(doc, sort_keys=True, separators=(', ', ': '), ensure_ascii=False)
except Exception as err:
## print some context info and return without any extracted query data..
if debug:
msg = f'''json2pattern():json.dumps Exception:\n Error: {sys.exc_info()[0]} : {err}\n saved_s: ({saved_s})\n doc: ({doc})\n'''
print(msg, file=sys.stderr)
return None
except:
print(f'''json2pattern():json.dumps Unexpected error: saved_s: ({saved_s}) sys.exc_info():{sys.exc_info()[0]}''')
raise
if __name__ == '__main__':
# define as True to get debug output of regex processing printed to stderr
debug = False
tests = {
'{d: {$gt: 2, $lt: 4}, b: {$gte: 3}, c: {$nin: [1, "foo", "bar"]}, "$or": [{a:"1uno"}, {b:"1uno"}] }' : '{"$or": [{"a": 1}, {"b": 1}], "b": 1, "c": {"$nin": 1}, "d": 1}',
'{a: {$gt: 2, $lt: 4}, "b": {$nin: [1, 2, 3]}, "$or": [{a:1}, {b:1}] }' : '{"$or": [{"a": 1}, {"b": 1}], "a": 1, "b": {"$nin": 1}}',
"{a: {$gt: 2, $lt: 4}, b: {$in: [ ObjectId('1234564863acd10e5cbf5f6e'), ObjectId('1234564863acd10e5cbf5f7e') ] } }" : '{"a": 1, "b": 1}',
"{ sk: -1182239108, _id: { $in: [ ObjectId('1234564863acd10e5cbf5f6e'), ObjectId('1234564863acd10e5cbf5f7e') ] } }" : '{"_id": 1, "sk": 1}',
'{ a: 1, b: { c: 2, d: "text" }, e: "more test" }' : '{"a": 1, "b": {"c": 1, "d": 1}, "e": 1}',
'{ _id: ObjectId(\'528556616dde23324f233168\'), config: { _id: 2, host: "localhost:27017" }, ns: "local.oplog.rs" }' : '{"_id": 1, "config": {"_id": 1, "host": 1}, "ns": 1}',
# 20191231 - bugre - issue#764 - adding some more test cases.. based on our mongodb logs (mongod 4.0.3)
r'{_id: ObjectId(\'528556616dde23324f233168\'), curList: [ "€", "XYZ", "Krown"], allowedSnacks: 1000 }' : '{"_id": 1, "allowedSnacks": 1, "curList": [1]}',
r'{_id: "test", curList: [ "1onum]pas", "ab\]c" ] }' : '{"_id": 1, "curList": [1]}',
r'{ $and: [ { mode: ObjectId(\'5aafd085edb85e0dc09dd985\') }, { _id: { $ne: ObjectId(\'5e015519877718752d63dd9c\') } }, '
'{ snack: { $in: [ "BLA", "RUN", "BLE" ] } }, { $or: [ { $and: [ { kind: "Solar" }, { wind: true }, '
'{ beginTime: { $gte: new Date(1577134729858) } } ] }, { $and: [ { kind: "event" }, { endTime: { $gte: new Date(1577739529858) } } ] } ] } ] }' :
'{"$and": [{"mode": 1}, {"_id": {"$ne": 1}}, {"snack": 1}, {"$or": [{"$and": [{"kind": 1}, {"wind": 1}, {"beginTime": 1}]}, {"$and": [{"kind": 1}, {"endTime": 1}]}]}]}',
# @niccottrell use case and 2nd one extrapolating the 1st one.
r'{ urls: { $all: [ "https://surtronic.info/" ] } }' : '{"urls": {"$all": [1]}}',
r'{ urls: { $all: [ "https://surtronic.info/", "http://url2.com" ] } }' : '{"urls": {"$all": [1]}}'
}
for k,v in tests.items():
r = json2pattern(k, debug)
if ( r == v ):
if debug :
print(f'''OK...: {k}\n Expect: {v}\n Output: {r}\n\n''')
else:
print(f'''OK: {k}''')
else:
print(f'''\nERROR **: {k}\n Expect: {v}\n Output: {r}\n\n''')
|
fproldan/pyday2016
|
pyday2016/wsgi.py
|
Python
|
gpl-3.0
| 396
| 0
|
"""
WSGI config for pyday2016 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https:
|
//docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pyday2016.settings")
application = get_wsgi_applica
|
tion()
|
rainerraul/pi-python
|
rfr101a.py
|
Python
|
mit
| 191
| 0.036649
|
#!/usr/bin/python3
import serial
import time
serialRFID = serial.Serial("/dev/ttyAMA0", 9600)
while Tr
|
ue:
print (serialRFID.read(12)[1:11])
serialRFID.flushInput()
time.sleep(0.2)
| |
erikr/howtostoreiosdata
|
howtostoreiosdata/urls.py
|
Python
|
mit
| 355
| 0.008451
|
from django.conf import set
|
tings
from django.conf.urls import patterns, include, url
urlpatterns = patterns(
|
'',
url(r'^', include('howtostoreiosdata.wizard.urls', namespace="wizard")),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
solashirai/edx-platform
|
scripts/safe_template_linter.py
|
Python
|
agpl-3.0
| 30,181
| 0.001325
|
#!/usr/bin/env python
"""
A linting tool to check if templates are safe
"""
from enum import Enum
import os
import re
import sys
_skip_dirs = (
'/node_modules',
'/vendor',
'/spec',
'/.pycharm_helpers',
'/test_root',
'/reports/diff_quality',
'/common/static/xmodule/modules',
)
_skip_mako_dirs = _skip_dirs
_skip_underscore_dirs = _skip_dirs + ('/test',)
def _is_skip_dir(skip_dirs, directory):
"""
Determines whether a directory should be skipped or linted.
Arguments:
skip_dirs: The configured directories to be skipped.
directory: The current directory to be tested.
Returns:
True if the directory should be skipped, and False otherwise.
"""
for skip_dir in skip_dirs:
dir_contains_skip_dir = (directory.find(skip_dir + '/') >= 0)
if dir_contains_skip_dir or directory.endswith(skip_dir):
return True
return False
def _load_file(self, file_full_path):
"""
Loads a file into a string.
Arguments:
file_full_path: The full path of the file to be loaded.
Returns:
A string containing the files contents.
"""
with open(file_full_path, 'r') as input_file:
file_contents = input_file.read()
return file_contents.decode(encoding='utf-8')
def _get_line_breaks(self, string):
"""
Creates a list, where each entry represents the index into the string where
the next line break was found.
Arguments:
string: The string in which to find line breaks.
|
Returns:
A list of indices into the string at which each line break can be
found.
"""
line_breaks = [0]
index = 0
while True:
index = string.find('\n', index)
if index < 0:
break
index += 1
|
line_breaks.append(index)
return line_breaks
def _get_line_number(self, line_breaks, index):
"""
Given the list of line break indices, and an index, determines the line of
the index.
Arguments:
line_breaks: A list of indices into a string at which each line break
was found.
index: The index into the original string for which we want to know the
line number
Returns:
The line number of the provided index.
"""
current_line_number = 0
for line_break_index in line_breaks:
if line_break_index <= index:
current_line_number += 1
else:
break
return current_line_number
def _get_line(self, string, line_breaks, line_number):
"""
Gets the line of text designated by the provided line number.
Arguments:
string: The string of content with line breaks.
line_breaks: A list of indices into a string at which each line break
was found.
line_number: The line number of the line we want to find.
Returns:
The line of text designated by the provided line number.
"""
start_index = line_breaks[line_number - 1]
if len(line_breaks) == line_number:
line = string[start_index:]
else:
end_index = line_breaks[line_number]
line = string[start_index:end_index - 1]
return line.encode(encoding='utf-8')
def _get_column_number(self, line_breaks, line_number, index):
"""
Gets the column (i.e. index into the line) for the given index into the
original string.
Arguments:
line_breaks: A list of indices into a string at which each line break
was found.
line_number: The line number of the line we want to find.
index: The index into the original string.
Returns:
The column (i.e. index into the line) for the given index into the
original string.
"""
start_index = line_breaks[line_number - 1]
column = index - start_index + 1
return column
class Rules(Enum):
"""
An Enum of each rule which the linter will check.
"""
mako_missing_default = ('mako-missing-default', 'Missing default <%page expression_filter="h"/>.')
mako_multiple_page_tags = ('mako-multiple-page-tags', 'A Mako template can only have one <%page> tag.')
mako_unparsable_expression = ('mako-unparsable-expression', 'The expression could not be properly parsed.')
mako_unwanted_html_filter = ('mako-unwanted-html-filter', 'Remove explicit h filters when it is provided by the page directive.')
mako_invalid_html_filter = ('mako-invalid-html-filter', 'The expression is using an invalid filter in an HTML context.')
mako_invalid_js_filter = ('mako-invalid-js-filter', 'The expression is using an invalid filter in a JavaScript context.')
mako_js_string_missing_quotes = ('mako-js-string-missing-quotes', 'An expression using the js_escape_string filter must have surrounding quotes.')
underscore_not_escaped = ('underscore-not-escaped', 'Expressions should be escaped using <%- expression %>.')
def __init__(self, rule_id, rule_summary):
self.rule_id = rule_id
self.rule_summary = rule_summary
class RuleViolation(object):
"""
Base class representing a rule violation which can be used for reporting.
"""
def __init__(self, rule):
"""
Init method.
Arguments:
rule: The Rule which was violated.
"""
self.rule = rule
self.full_path = ''
def prepare_results(self, full_path, file_string, line_breaks):
"""
Preps this instance for results reporting.
Arguments:
full_path: Path of the file in violation.
file_string: The contents of the file in violation.
line_breaks: A list of indices into file_string at which each line
break was found.
"""
self.full_path = full_path
def print_results(self):
"""
Prints the results represented by this rule violation.
"""
print "{}: {}".format(self.full_path, self.rule.rule_id)
class ExpressionRuleViolation(RuleViolation):
"""
A class representing a particular rule violation for expressions which
contain more specific details of the location of the violation for reporting
purposes.
"""
def __init__(self, rule, expression):
"""
Init method.
Arguments:
rule: The Rule which was violated.
expression: The expression that was in violation.
"""
super(ExpressionRuleViolation, self).__init__(rule)
self.expression = expression
self.start_line = 0
self.start_column = 0
self.end_line = 0
self.end_column = 0
self.lines = []
def prepare_results(self, full_path, file_string, line_breaks):
"""
Preps this instance for results reporting.
Arguments:
full_path: Path of the file in violation.
file_string: The contents of the file in violation.
line_breaks: A list of indices into file_string at which each line
break was found.
"""
self.full_path = full_path
start_index = self.expression['start_index']
self.start_line = _get_line_number(self, line_breaks, start_index)
self.start_column = _get_column_number(self, line_breaks, self.start_line, start_index)
end_index = self.expression['end_index']
if end_index > 0:
self.end_line = _get_line_number(self, line_breaks, end_index)
self.end_column = _get_column_number(self, line_breaks, self.end_line, end_index)
else:
self.end_line = self.start_line
self.end_column = '?'
for line_number in range(self.start_line, self.end_line + 1):
self.lines.append(_get_line(self, file_string, line_breaks, line_number))
def print_results(self):
"""
Prints the results represented by this rule violation.
"""
for line_number in range(self.start_line, self.end_line + 1):
if (line_number == self.start_line):
column = self.start_column
rule_id = self.rule.rule_id + ":"
else:
|
paris-saclay-cds/ramp-workflow
|
rampwf/score_types/tests/test_soft_accuracy.py
|
Python
|
bsd-3-clause
| 3,379
| 0
|
import numpy as np
from rampwf.score_types.soft_accuracy import SoftAccuracy
score_matrix_1 = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
])
score_matrix_2 = np.array([
[1, 0.5, 0],
[0.3, 1, 0.3],
[0, 0.5, 1],
])
y_true_proba_1 = np.array([1, 0, 0])
y_true_proba_2 = np.array([0, 1, 0])
y_true_proba_3 = np.array([0.5, 0.5, 0])
y_proba_1 = np.array([1, 0, 0])
y_proba_2 = np.array([0, 1, 0])
y_proba_3 = np.array([0, 0.1, 0])
y_proba_4 = np.array([-1, 0.1, -2])
y_proba_5 = np.array([0, 0, 0])
y_proba_6 = np.array([0.5, -1, 3])
def test_soft_accuracy():
score_1 = SoftAccuracy(score_matrix=score_matrix_1)
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_1])) == 1
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_2])) == 0
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_3])) == 0
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_4])) == 0
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_1])) == 0
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_2])) == 1
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_3])) == 1
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_4])) == 1
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_1])) == 0.5
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_2])) == 0.5
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_5])) == 0.0
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_5])) == 0.0
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_5])) == 0
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_6])) == 1 / 3
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_6])) == 0.0
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_6])) == 1 / 6
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_3])) == 0.5
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_4])) == 0.5
score_2 = SoftAccuracy(score_matrix=score_matrix_2)
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_1])) == 1
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_2])) == 0.5
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_3])) == 0.5
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_4])) == 0.5
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_1])) == 0.3
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_2])) == 1
assert score_2(np.array([y_true_proba_2]), np.arr
|
ay([y_proba_3])) == 1
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_4])) == 1
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_1])) == 0.65
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_2])) == 0.75
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_5])) == 0.0
assert score_2(np.array([y_true_p
|
roba_2]), np.array([y_proba_5])) == 0.0
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_5])) == 0.0
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_6])) == 1 / 3
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_6])) == 0.3
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_3])) == 0.75
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_4])) == 0.75
|
mja054/swift_plugin
|
swift/obj/server.py
|
Python
|
apache-2.0
| 39,651
| 0.001841
|
# Copyright (c) 2010-2011 OpenStack, LLC.
# Copyright (c) 2008-2011 Gluster, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
from __future__ import with_statement
import cPickle as pickle
import errno
import os
import time
import traceback
from datetime import datetime
from hashlib import md5
from tempfile import mkstemp
from urllib import unquote
from contextlib import contextmanager
from ConfigParser import ConfigParser
from webob import Request, Response, UTC
from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPNotModified, HTTPPreconditionFailed, \
HTTPRequestTimeout, HTTPUnprocessableEntity, HTTPMethodNotAllowed
from xattr import getxattr, setxattr
from eventlet import sleep, Timeout, tpool
from swift.common.utils import mkdirs, normalize_timestamp, \
storage_directory, hash_path, renamer, fallocate, \
split_path, drop_buffer_cache, get_logger, write_pickle, \
plugin_enabled
from swift.common.bufferedhttp import http_connect
if plugin_enabled():
from swift.plugins.constraints import check_object_creation
from swift.plugins.utils import X_TYPE, X_OBJECT_TYPE, FILE, DIR, MARKER_DIR, \
OBJECT, DIR_TYPE, FILE_TYPE
else:
from swift.common.constraints import check_object_creation
from swift.common.constraints import check_mount, check_float, check_utf8
from swift.common.exceptions import ConnectionTimeout, DiskFileError, \
DiskFileNotExist
from swift.obj.replicator import tpooled_get_hashes, invalidate_hash, \
quarantine_renamer
DATADIR = 'objects'
ASYNCDIR = 'async_pending'
PICKLE_PROTOCOL = 2
METADATA_KEY = 'user.swift.metadata'
MAX_OBJECT_NAME_LENGTH = 1024
KEEP_CACHE_SIZE = (5 * 1024 * 1024)
# keep these lower-case
DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
def read_metadata(fd):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor to load the metadata from
:returns: dictionary of metadata
"""
metadata = ''
key = 0
try:
while True:
metadata += getxattr(fd, '%s%s' % (METADATA_KEY, (key or '')))
key += 1
except IOError:
pass
return pickle.loads(metadata)
def write_metadata(fd, metadata):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor to write the metadata
:param metadata: metadata to write
"""
metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
key = 0
while metastr:
setxattr(fd, '%s%s' % (METADATA_KEY, key or ''), metastr[:254])
metastr = metastr[254:]
key += 1
class DiskFile(object):
"""
Manage object files on disk.
:param path: path to devices on the node
:param device: device name
:param partition: partition on the device the object lives in
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param keep_data_fp: if True, don't close the fp, otherwise close it
:param disk_chunk_Size: size of chunks on file reads
"""
def __init__(self, path, device, partition, account, container, obj,
logger, keep_data_fp=False, disk_chunk_size=65536):
self.disk_chunk_size = disk_chunk_size
self.name = '/' + '/'.join((account, container, obj))
name_hash = hash_path(account, container, obj)
self.datadir = os.path.join(path, device,
storage_directory(DATADIR, partition, name_hash))
self.device_path = os.path.join(path, device)
self.tmpdir = os.path.join(path, device, 'tmp')
self.logger = logger
self.metadata = {}
self.meta_file = None
self.data_file = None
self.fp = None
self.iter_etag = None
self.started_at_0 = False
self.read_to_eof = False
self.quarantined_dir = None
self.keep_cache = False
if not os.path.exists(self.datadir):
return
files = sorted(os.listdir(self.datadir), reverse=True)
for file in files:
if file.endswith('.ts'):
self.data_file = self.meta_file = None
self.metadata = {'deleted': True}
return
if file.endswith('.meta') and not self.meta_file:
self.meta_file = os.path.join(self.datadir, file)
if file.endswith('.data') and not self.data_file:
self.data_file = os.path.join(self.datadir, file)
break
if not self.data_file:
return
self.fp = open(self.data_file, 'rb')
self.metadata = read_metadata(self.fp)
if not keep_data_fp:
self.close(verify_file=False)
if self.meta_file:
with open(self.meta_file) as mfp:
for key in self.metadata.keys():
if key.lower() not in DISALLOWED_HEADERS:
del self.metadata[key]
self.metadata.update(read_metadata(mfp))
def __iter__(self):
"""Returns an iterator over the data file."""
try:
dropped_cache = 0
read = 0
self.started_at_0 = False
self.read_to_eof = False
if self.fp.tell() == 0:
self.started_at_0 = True
self.iter_etag = md5()
while True:
chunk = self.fp.read(self.disk_chunk_size)
if chunk:
if self.iter_etag:
self.iter_etag.update(chunk)
read += len(chunk)
if read - dropped_cache > (1024 * 1024):
self.drop_
|
cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
dropped_cache = read
yield chunk
else:
self.read_to_eof = True
self.drop_cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
break
finally:
self.close()
def app_iter_range(self, start, stop):
"""Returns an iterator
|
over the data file for range (start, stop)"""
if start:
self.fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
def _handle_close_quarantine(self):
"""Check if file needs to be quarantined"""
try:
obj_size = self.get_data_file_size()
except DiskFileError, e:
self.quarantine()
return
except DiskFileNotExist:
return
if (self.iter_etag and self.started_at_0 and self.read_to_eof and
'ETag' in self.metadata and
self.iter_etag.hexdigest() != self.metadata.get('ETag')):
self.quarantine()
def close(self, verify_file=True):
"""
Close the file. Will handle quarantining file if necessary.
:param verify_file: Defaults to True. If false, will not check
file to see if it needs quarantining.
"""
if self.fp:
try:
if verify_file:
self._handle_close_qua
|
rendon/omegaup
|
stuff/libkarel.py
|
Python
|
bsd-3-clause
| 5,548
| 0.026073
|
# -*- coding: utf-8 -*-
"""Librería para parsear entradas y salidas de Karel en XML."""
import xml.etree.ElementTree as ET
import sys
def load():
"""Regresa (input, output, nombre de caso) para la ejecución actual"""
with open('data.in', 'r') as data_in:
return KarelInput(data_in.read()), KarelOutput(sys.stdin.read()), sys.argv[1]
class KarelInput:
"""Representa un archivo .in. Los siguientes miembros están definidos:
* w: el ancho del mundo
* h: el alto del mundo
* x: la posición x inicial de Karel
* y: la posición y inicial de Karel
* direccion: La orientación inicial de Karel. Puede ser uno de ['NORTE', 'ESTE', 'SUR', 'OESTE']
* mochila: El número de zumbadores en la mochila de Karel. Puede ser un entero o la cadena 'INFINITO'
* despliega: Lista de elementos que se van a guardar en la salida. Puede ser uno de ['MUNDO', 'ORIENTACION', 'POSICION']
* despliega_posicion: True si se va a desplegar la posición final de Karel en la salida
* despliega_orientacion: True si se va a desplegar la orientación final de Karel en la salida
* despliega_mundo: True si se van a desplegar los zumbadores finales elegidor en la salida
* _lista_dump: La lista original de posiciones (x, y) de casillas que se van a desplegar en la salida
* _dump: Un diccionario donde cada llave (x, y) que esté definida significa que se va a desplegar la casilla
* _lista_zumbadores: La lista original de montones (x, y, zumbadores) en el mundo
* _zumbadores: Un diccionario donde cada llave (x, y) tiene como valor el número de zumbadores en esa casilla"""
def __init__(self, string):
self.root = ET.fromstring(string)
mundo = self.root.find('mundos/mundo').attrib
self.w = int(mundo['ancho'])
self.h = int(mundo['alto'])
programa = self.root.find('programas/programa').attrib
self.x = int(programa['xKarel'])
self.y = int(programa['yKarel'])
self.direccion = programa['direccionKarel']
self.mochila = programa['mochilaKarel']
if self.mochila != 'INFINITO':
self.mochila = int(self.mochila)
self.despliega = map(
lambda x: x.attrib['tipo'].upper(),
self.root.findall('programas/programa/despliega')
)
self.despliega_orientacion = 'ORIENTACION' in self.despliega
self.despliega_mundo = 'MUNDO' in self.despliega
self.despliega_posicion = 'POSICION' in self.despliega
self.despliega_instrucciones = 'INSTRUCCIONES' in self.despliega
self._lista_zumbadores = map(
lambda x: {
'x': int(x.attrib['x']),
'y': int(x.attrib['y']),
'zumbadores': x.attrib['zumbadores']
},
self.root.findall('mundos/mundo/monton')
)
self._zumbadores = {(x['x'], x['y']): x['zumbadores'] for x in self._lista_zumbadores}
self._lista_dump = map(
lambda x: {k: int(x.attrib[k]) for k in x.attrib},
self.root.findall('mundos/mundo/posicionDump')
)
self._dump = {(x['x'], x['y']): True for x in self._lista_dump}
def __repr__(self):
"""Imprime una versión bonita del objeto"""
return '<libkarel.KarelInput %s>' % ', '.join(map(lambda x: '%s=%r' % x, {
'x': self.x,
'y': self.y,
'mochila': self.mochila,
'direccion': self.direccion,
'despliega': self.despliega,
}.iteritems()))
def zumbadores(self, x, y):
"""Regresa el número de zumbadores (o la cadena 'INFINITO') para la casilla en (x, y)"""
if (x, y) not in self._zumbadores:
return 0
z = self._zumbadores[(x, y)]
if z == 'INFINITO':
return z
return int(z)
def dump(self, x, y):
"""Regresa True si la casilla está marcada para generar una salida"""
return (x, y) in self._dump
class KarelOutput:
"""Representa un archivo .out. Los siguientes miembros están definidos:
* resultado: una cadena con el resultado de la ejecución. 'FIN PROGRAMA' significa ejecución exitosa.
* error: True si no fue una ejeción exitosa.
* x: la posición x final de Karel. None si no se encuentra en la salida.
* y: la po
|
sición y final de Karel. None si no se encuentra en la salida.
* direccion: La orientación inicial de Karel. Puede ser uno de ['NORTE', 'ESTE', 'SUR', 'OESTE'], o None si no se encuentra
* _zumbadores: Un diccionario donde cada llave (x, y) tiene como valor el número de zumbadores en esa casilla al final de la ejecución"""
def
|
__init__(self, string):
self.root = ET.fromstring(string)
self._zumbadores = {}
for linea in self.root.findall('mundos/mundo/linea'):
y = int(linea.attrib['fila'])
x = 0
for token in linea.text.strip().split():
if token[0] == '(':
x = int(token[1:-1])
else:
self._zumbadores[(x, y)] = token
x += 1
self.resultado = self.root.find('programas/programa').attrib['resultadoEjecucion']
self.error = self.resultado != 'FIN PROGRAMA'
karel = self.root.find('programas/programa/karel')
self.x = None
self.y = None
self.direccion = None
if karel:
if 'x' in karel.attrib:
self.x = int(karel.attrib['x'])
self.y = int(karel.attrib['y'])
if 'direccion' in karel.attrib:
self.direccion = karel.attrib['direccion']
def __repr__(self):
"""Imprime una versión bonita del objeto"""
return '<libkarel.KarelOutput %s>' % ', '.join(map(lambda x: '%s=%r' % x, {
'x': self.x,
'y': self.y,
'direccion': self.direccion,
'resultado': self.resultado,
'error': self.error,
}.iteritems()))
def zumbadores(self, x, y):
"""Regresa el número de zumbadores (o la cadena 'INFINITO') para la casilla en (x, y)"""
if (x, y) not in self._zumbadores:
return 0
z = self._zumbadores[(x, y)]
if z == 'INFINITO':
return z
return int(z)
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractForthemoneyTranslations.py
|
Python
|
bsd-3-clause
| 249
| 0.028112
|
def extractForthemo
|
neyTranslations(item):
"""
Forthemoney Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return N
|
one
return False
|
Konubinix/qutebrowser
|
tests/unit/javascript/position_caret/test_position_caret.py
|
Python
|
gpl-3.0
| 3,322
| 0
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later versio
|
n.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU
|
General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for position_caret.js."""
import pytest
from PyQt5.QtCore import Qt
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebPage
@pytest.yield_fixture(autouse=True)
def enable_caret_browsing(qapp):
"""Fixture to enable caret browsing globally."""
settings = QWebSettings.globalSettings()
old_value = settings.testAttribute(QWebSettings.CaretBrowsingEnabled)
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, True)
yield
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, old_value)
class CaretTester:
"""Helper class (for the caret_tester fixture) for asserts.
Attributes:
js: The js_tester fixture.
"""
def __init__(self, js_tester):
self.js = js_tester
def check(self):
"""Check whether the caret is before the MARKER text."""
self.js.run_file('position_caret.js')
self.js.webview.triggerPageAction(QWebPage.SelectNextWord)
assert self.js.webview.selectedText().rstrip() == "MARKER"
def check_scrolled(self):
"""Check if the page is scrolled down."""
frame = self.js.webview.page().mainFrame()
minimum = frame.scrollBarMinimum(Qt.Vertical)
value = frame.scrollBarValue(Qt.Vertical)
assert value > minimum
@pytest.fixture
def caret_tester(js_tester):
"""Helper fixture to test caret browsing positions."""
return CaretTester(js_tester)
@pytest.mark.integration
def test_simple(caret_tester):
"""Test with a simple (one-line) HTML text."""
caret_tester.js.load('position_caret/simple.html')
caret_tester.check()
@pytest.mark.integration
def test_scrolled_down(caret_tester):
"""Test with multiple text blocks with the viewport scrolled down."""
caret_tester.js.load('position_caret/scrolled_down.html')
caret_tester.js.scroll_anchor('anchor')
caret_tester.check_scrolled()
caret_tester.check()
@pytest.mark.integration
@pytest.mark.parametrize('style', ['visibility: hidden', 'display: none'])
def test_invisible(caret_tester, style):
"""Test with hidden text elements."""
caret_tester.js.load('position_caret/invisible.html', style=style)
caret_tester.check()
@pytest.mark.integration
def test_scrolled_down_img(caret_tester):
"""Test with an image at the top with the viewport scrolled down."""
caret_tester.js.load('position_caret/scrolled_down_img.html')
caret_tester.js.scroll_anchor('anchor')
caret_tester.check_scrolled()
caret_tester.check()
|
igor-shevchenko/rutermextract
|
example.py
|
Python
|
mit
| 274
| 0.004386
|
# coding=utf-8
|
from rutermextract import TermExtractor
term_extractor = TermExtractor()
text =
|
u'Съешь ещё этих мягких французских булок да выпей же чаю.'
for term in term_extractor(text):
print term.normalized, term.count
|
zac11/AutomateThingsWithPython
|
Lists/presence_of_element.py
|
Python
|
mit
| 241
| 0.024896
|
listofcricket=['kohli','mathews','morgan','sou
|
thee','tamim','smith']
print('Enter a name to search')
name = input()
if name not in listofcricket:
print(name+' is not in the list of cricketers')
else:
print(nam
|
e+' is in this list')
|
SalesforceFoundation/mrbelvedereci
|
metaci/testresults/migrations/0013_install_summary_job.py
|
Python
|
bsd-3-clause
| 1,046
| 0
|
# Generated by Django 2.1.7 on 2019-04-26 17:55
from django.db import migrations, models
from scheduler.models import RepeatableJob
from django.utils import timezone
from django_rq import job
@job("short")
def install_generate_summaries_job(apps, schema_editor):
job, created = RepeatableJob.objects.get_or_create(
callable="metaci.testresults.tasks.generate_summaries",
enabled=True,
name="generate_summaries_job",
queue="short",
defaults={
"interval": 30,
"interval_unit": "minutes",
"scheduled_time": timezone.now(),
},
)
def uninstall_generate_summaries_job(apps, schema_editor):
Re
|
peatableJob.objects.filter(name="generate_summaries_job")
|
.delete()
class Migration(migrations.Migration):
atomic = False
dependencies = [("testresults", "0012_create_summaries")]
operations = [
migrations.RunPython(
install_generate_summaries_job,
reverse_code=uninstall_generate_summaries_job,
)
]
|
qiita-spots/qp-qiime2
|
qp_qiime2/util.py
|
Python
|
bsd-3-clause
| 1,804
| 0
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
def get_qiime2_type_name_and_predicate(element):
"""helper method to get the qiime2 type name and predicate
Parameters
----------
element : qiime2.core.type.signature
The signature to parse
Returns
-------
str, dict
The name and the predicate of the inputed signature
"""
to_ast = element.qiime_type.to_ast()
if to_ast['type'] == 'union':
# union types allow to give choices to another type of parameter; for
# example for a range(1, 10) give the choice `ignore`. These are not
# necessary in Qiita as they are simply ignored if unchanged. Thus,
# we loop over the members of the union and ingore `Choices`.
to_ast = [x for x in to_ast['members']
if x['predicate']['name'] != 'Choices'][0]
predicate = to_ast['predicate']
elif to_ast['name'] == 'FeatureData':
predicate = []
for f in to_ast['fields']:
if 'members' in f:
for fm in f['members']:
predicate.append(fm['name'])
elif 'mapping' in f:
for fm in f['mapping']:
for fme in fm:
|
predicate.append(fme['name'])
else:
predicate.append(f['name'])
predicate = sorted(list(set(predicate)))
else:
predicate = element.qiime_type.predicate
name = to
|
_ast['name']
return name, predicate
|
andre-geldenhuis/bloggregator
|
blogaggregator/tests/test_models.py
|
Python
|
gpl-2.0
| 1,687
| 0.001186
|
# -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from blogaggregator.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self, db):
user = UserFactory(password="myprecious")
db.session.commit()
assert
|
bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert u
|
ser.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="foo@bar.com",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles
|
nricklin/leafpy
|
tests/unit/test_login.py
|
Python
|
mit
| 2,246
| 0.008459
|
import unittest
from leafpy import Leaf
from leafpy.auth import login
import vcr
USERNAME = 'dummyuser'
PASSWORD = 'dummypass'
class LoginTests(unittest.TestCase):
@vcr.use_cassette('tests/unit/cassettes/test_login.yaml',
filter_post_data_parameters=['UserId','Password'])
def test_login(self):
leaf = Leaf(USERNAME, PASSWORD)
assert leaf.VIN == "vin123"
assert leaf.custom_sessionid == "csessid"
def test_login_with_custom_sessionid_and_vin(self):
leaf = Leaf(VIN='vin345', custom_sessionid='csid123')
assert leaf.VIN == 'vin345'
assert leaf.custom_sessionid == 'csid123'
@vcr.use_cassette('tests/unit/cassettes/test_exeption_raised_when_bad_credentials_passed.yaml',
filter_post_data_parameters=['UserId','Password'])
def test_exeption_raised_when_bad_credentials_passed(self):
with self.assertRaises(Exception) as w:
leaf = Leaf('bad_email@domain.com','invalidpassword')
@vcr.use_cassette('tests/unit/cassettes/test_exception_raised_when_bad_vin_and_customsessionid_used.yaml',
filter_post_data_parameters=['UserId','Password'])
def test_exception_raised_when_bad_vin_and_customsessioni
|
d_used(self):
leaf = Leaf(VIN='vin345',custom_sessionid='csid123')
with self.assertRaises(Exception) as w:
leaf.BatteryStatusRecordsRequest()
def test_login_with_only_username_raises_exception(self):
|
with self.assertRaises(Exception):
leaf = Leaf('username')
def test_login_with_only_VIN_raises_exception(self):
with self.assertRaises(Exception):
leaf = Leaf(VIN='vin123')
def test_login_with_only_custom_sessionid_raises_exception(self):
with self.assertRaises(Exception):
leaf = Leaf(custom_sessionid='vin123')
def test_login_with_no_args_raises_exception(self):
with self.assertRaises(Exception):
leaf = Leaf()
@vcr.use_cassette('tests/unit/cassettes/test_login_standalone.yaml',
filter_post_data_parameters=['UserId','Password'])
def test_login_standalone(self):
csid, VIN = login(USERNAME,PASSWORD)
assert csid == 'csessid'
assert VIN == 'vin123'
|
mightypenguin/qotd
|
slackbot.py
|
Python
|
mit
| 4,015
| 0.003736
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from collections import namedtuple
import json
import logging
import time
from collections import namedtuple
from slackclient import SlackClient
class SlackBot(object):
def __init__(self, settingsFilePath='settings.json'):
self.s = {}
self.sclient = {}
### Not necessary, moved to settings?
# self.botcheck = '' #<@' + s['bot']['id'] + '>: '
with file(settingsFilePath, 'r') as settingsfile:
self.s = json.load(settingsfile)
logging.debug(self.s)
self.lastping = int(time.time())
self.sclient = SlackClient(self.s["token"])
self.CommandBody = namedtuple('CommandBody', 'actio
|
n help')
self.botcheck = '' # <@' + s['bot']['id'] + '>: '
self.commands = {}
#"""{ u'channel': u'G1FS1CJ84',
# u'team': u'T05311JTT',
# u'text': u'<@U1FRJ3WMU>: lol',
# u'ts': u'1465583194.000034',
# u'type': u'message',
# u'user': u'U0LJ6Q4S0'}""" ### Typical structure of a command packet
def help(self, msg):
output = self.sclient.api_call('chat.postMessage',
|
as_user='true',
channel=msg['channel'],
text=self.commands["help"].help)
logging.debug(output)
def generateHelp(self):
helptext = 'Commands are:\n'
for c in self.commands:
helptext += "\t" + self.botcheck + self.commands[c].help + "\n"
helptext += "\t" + self.botcheck + "help [this help text]\n"
self.commands['help'] = self.CommandBody(action=self.help, help=helptext)
def get_bot_id(self):
api_call = self.sclient.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == self.s["bot"]["name"]:
self.s["bot"]["id"] = user.get('id')
self.botcheck = '<@' + self.s['bot']['id'] + '>: '
return ({user['name']: user.get('id')})
else:
return "could not find bot user with the name " + s["bot"]["name"]
def autoping(self,last):
### hardcode the interval to 3 seconds
now = int(time.time())
if last + 3 < now:
self.sclient.server.ping()
return now
def addCommand(self, command, action, help):
self.commands[command] = self.CommandBody(action=action, help=help)
def sendReply(self, msg):
text = msg['text'][len(self.botcheck):]
pos = len(text)
try:
pos = text.index(' ')
except ValueError:
pass
cmd = text[:pos] # grab command string up to first space
logging.info('cmd ="' + cmd + '"')
if cmd in self.commands:
self.commands[cmd].action(msg)
def monitor(self):
#self.sclient = SlackClient(s["token"])
logging.info("Connecting as " + self.s["bot"]["name"])
if self.sclient.rtm_connect():
logging.info("...Connected!")
logging.debug(self.get_bot_id())
self.generateHelp()
last_ping = int(time.time())
while True:
messages = self.sclient.rtm_read()
# logging.debug(messages)
last_ping = self.autoping(last_ping)
for message in messages:
if all(k in message for k in ('type', 'text')) \
and message['type'] == 'message' \
and 'bot_id' not in message \
and self.botcheck in message['text']:
logging.debug(message)
self.sendReply(message)
time.sleep(1)
else:
logging.info("Connection Failed, invalid token?")
|
enigmampc/catalyst
|
catalyst/finance/performance/position_tracker.py
|
Python
|
apache-2.0
| 13,092
| 0.000076
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import logbook
import numpy as np
from collections import namedtuple
from math import isnan
from six import iteritems, itervalues
from catalyst.finance.performance.position import Position
from catalyst.finance.transaction import Transaction
from catalyst.utils.input_validation import expect_types
import catalyst.protocol as zp
from catalyst.assets import (
Future,
Asset
)
from . position import positiondict
from catalyst.constants import LOG_LEVEL
log = logbook.Logger('Performance', level=LOG_LEVEL)
PositionStats = namedtuple('PositionStats',
['net_exposure',
'gross_value',
'gross_exposure',
'short_value',
'short_exposure',
'shorts_count',
'long_value',
'long_exposure',
'longs_count',
'net_value'])
def calc_position_values(positions):
values = []
for position in positions:
if isinstance(position.asset, Future):
# Futures don't have an inherent position value.
values.append(0.0)
else:
values.append(position.last_sale_price * position.amount)
return values
def calc_net(values):
# Returns 0.0 if there are no values.
return sum(values, np.float64())
def calc_position_exposures(positions):
exposures = []
for position in positions:
exposure = position.amount * position.last_sale_price
if isinstance(position.asset, Future):
exposure *= position.asset.multiplier
exposures.append(exposure)
return exposures
def calc_long_value(position_values):
return sum(i for i in position_values if i > 0)
def calc_short_value(position_values):
return sum(i for i in position_values if i < 0)
def calc_long_exposure(position_exposures):
return sum(i for i in position_exposures if i > 0)
def calc_short_exposure(position_exposures):
return sum(i for i in position_exposures if i < 0)
def calc_longs_count(position_exposures):
return sum(1 for i in position_exposures if i > 0)
def calc_shorts_count(position_exposures):
return sum(1 for i in position_exposures if i < 0)
def calc_gross_exposure(long_exposure, short_exposure):
return long_exposure + abs(short_exposure)
def calc_gross_value(long_value, short_value):
return long_value + abs(short_value)
class PositionTracker(object):
def __init__(self, data_frequency):
# asset => position object
self.positions = positiondict()
self._unpaid_dividends = {}
self._unpaid_stock_dividends = {}
self._positions_store = zp.Positions()
self.data_frequency = data_frequency
@expect_types(asset=Asset)
def update_position(self, asset, amount=None, last_sale_price=None,
last_sale_date=None, cost_basis=None):
if asset not in self.positions:
position = Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
if amount is not None:
position.amount = amount
if last_sale_price is not None:
position.last_sale_price = last_sale_pr
|
ice
if last_sale_date is not None:
position.last_sale_date = last_sale_date
if cost_basis is not None:
position.cost_basis = cost_basis
def execute_transaction(self, txn):
# Update Position
# ----------------
asset = txn.asset
if asset not in self.positions:
position
|
= Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
position.update(txn)
if position.amount == 0:
del self.positions[asset]
try:
# if this position exists in our user-facing dictionary,
# remove it as well.
del self._positions_store[asset]
except KeyError:
pass
@expect_types(asset=Asset)
def handle_commission(self, asset, cost):
# Adjust the cost basis of the stock if we own it
if asset in self.positions:
self.positions[asset].adjust_commission_cost_basis(asset, cost)
def handle_splits(self, splits):
"""
Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
int: The leftover cash from fractional sahres after modifying each
position.
"""
total_leftover_cash = 0
for asset, ratio in splits:
if asset in self.positions:
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[asset]
leftover_cash = position.handle_split(asset, ratio)
total_leftover_cash += leftover_cash
return total_leftover_cash
def earn_dividends(self, dividends, stock_dividends):
"""
Given a list of dividends whose ex_dates are all the next trading day,
calculate and store the cash and/or stock payments to be paid on each
dividend's pay date.
Parameters
----------
dividends: iterable of (asset, amount, pay_date) namedtuples
stock_dividends: iterable of (asset, payment_asset, ratio, pay_date)
namedtuples.
"""
for dividend in dividends:
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
div_owed = self.positions[dividend.asset].earn_dividend(dividend)
try:
self._unpaid_dividends[dividend.pay_date].append(div_owed)
except KeyError:
self._unpaid_dividends[dividend.pay_date] = [div_owed]
for stock_dividend in stock_dividends:
div_owed = \
self.positions[stock_dividend.asset].earn_stock_dividend(
stock_dividend)
try:
self._unpaid_stock_dividends[stock_dividend.pay_date].\
append(div_owed)
except KeyError:
self._unpaid_stock_dividends[stock_dividend.pay_date] = \
[div_owed]
def pay_dividends(self, next_trading_day):
"""
Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends.
"""
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment['amount']
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_s
|
pytorch/vision
|
torchvision/ops/boxes.py
|
Python
|
bsd-3-clause
| 12,872
| 0.002331
|
from typing import Tuple
import torch
import torchvision
from torch import Tensor
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._box_convert import _box_cxcywh_to_xyxy, _box_xyxy_to_cxcywh, _box_xywh_to_xyxy, _box_xyxy_to_xywh
def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
"""
Performs non-maximum suppression (NMS) on the boxes according
to their intersection-over-union (IoU).
NMS iteratively removes lower scoring boxes which have an
IoU greater than iou_threshold with another (higher scoring)
box.
If multiple boxes have the exact same score and satisfy the IoU
criterion with respect to a reference box, the selected box is
not guaranteed to be the same between CPU and GPU. This is similar
to the behavior of argsort in PyTorch when repeated values are present.
Args:
boxes (Tensor[N, 4])): boxes to perform NMS on. They
are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and
``0 <= y1 < y2``.
scores (Tensor[N]): scores for each one of the boxes
iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold
Returns:
Tensor: int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(nms)
_assert_has_ops()
return torch.ops.torchvision.nms(boxes, scores, iou_threshold)
def batched_nms(
boxes: Tensor,
scores: Tensor,
idxs: Tensor,
iou_threshold: float,
) -> Tensor:
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 4]): boxes where NMS will be performed. They
are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and
``0 <= y1 < y2``.
scores (Tensor[N]): scores for each one of the boxes
idxs (Tensor[N]): indices of the categories for each one of the boxes.
iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold
Returns:
Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted
in decreasing order of scores
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(batched_nms)
# Benchmarks that drove the following thresholds are at
# https://github.com/pytorch/vision/issues/1311#issuecomment-781329339
if boxes.numel() > (4000 if boxes.device.type == "cpu" else 20000) and not torchvision._is_tracing():
return _batched_nms_vanilla(boxes, scores, idxs, iou_threshold)
else:
return _batched_nms_coordinate_trick(boxes, scores, idxs, iou_threshold)
@torch.jit._script_if_tracing
def _batched_nms_coordinate_trick(
boxes: Tensor,
scores: Tensor,
idxs: Tensor,
iou_threshold: float,
) -> Tensor:
# strategy: in order to perform NMS independently per class,
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
return keep
@torch.jit._script_if_tracing
def _batched_nms_vanilla(
boxes: Tensor,
scores: Tensor,
idxs: Tensor,
iou_threshold: float,
) -> Tensor:
# Based on Detectron2 implementation, just manually call nms() on each class independently
keep_mask = torch.zeros_like(scores, dtype=torch.bool)
for class_id in torch.unique(idxs):
curr_indices = torch.where(idxs == class_id)[0]
curr_keep_indices = nms(boxes[curr_indices], scores[curr_indices], iou_threshold)
keep_mask[curr_indices[curr_keep_indices]] = True
keep_indices = torch.where(keep_mask)[0]
return keep_indices[scores[keep_indices].sort(descending=True)[1]]
def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
"""
Remove boxes which contains at least one side smaller than min_size.
Args:
boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format
with ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
min_size (float): minimum size
Returns:
Tensor[K]: indices of the boxes that have both sides
larger than min_size
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(remove_small_boxes)
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size)
keep = torch.where(keep)[0]
return keep
def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
"""
Clip boxes so that they lie inside an image of size `size`.
Args:
boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format
with ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
size (Tuple[height, width]): size of the image
Returns:
Tensor[N, 4]: clipped boxes
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(clip_boxes_to_image)
dim = boxes.dim()
boxes_x = boxes[..., 0::2]
boxes_y = boxes[..., 1::2]
height, width = size
if torchvision._is_tracing():
boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))
boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device))
boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))
boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device))
else:
boxes_x = boxes_x.clamp(min=0, max=width)
boxes_y = boxes_y.clamp(min=0, max=height)
clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)
return clipped_boxes.reshape(boxes.shape)
def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor:
"""
Converts boxes from given in_fmt to out_fmt.
Supported in_fmt and out_fmt are:
'xyxy': boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right.
This is the format that torchvision utilities expect.
'xywh' : boxes are represented via corner, width and height, x1, y2 being top left, w, h being width and height.
'cxcywh' : boxes are represented via centre, width and height, cx, cy being center of box, w, h
being width and height.
Args:
boxes (Tensor[N, 4]): boxes which will be converted.
in_fmt (str): Input format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh'].
out_fmt (str): Output format o
|
f given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh']
Returns:
Tensor[N, 4]: Boxes into converted format.
"""
if not torch.jit.is_scripting() and not torch.jit.i
|
s_tracing():
_log_api_usage_once(box_convert)
allowed_fmts = ("xyxy", "xywh", "cxcywh")
if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts:
raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt")
if in_fmt == out_fmt:
return boxes.clone()
if in_fmt != "xyxy" and out_fmt != "xyxy":
# convert to xyxy and change in_fmt xyxy
if in_fmt == "xywh":
boxes = _box_xywh_to_xyxy(boxes)
elif in_fmt == "cxcywh":
boxes = _box_cxcywh_to_xyxy(boxes)
in_fmt = "xyxy"
if in_fmt == "xyxy":
if out_fmt == "xywh":
boxes = _box_xyxy_to_xywh(boxes)
elif out_fmt == "cxcywh":
boxes = _box_xyxy_to_cxcywh(boxes)
elif out_fmt == "xyxy":
if in_fmt == "xywh":
boxes = _box_xywh_to_xyxy(
|
SFII/cufcq-new
|
models/campus.py
|
Python
|
mit
| 1,334
| 0.004498
|
from models.basemodel import BaseModel
class Campus(BaseModel):
CAMPUS_CODES = ['BD', 'DN', 'CS']
LONG_NAMES = {
'BD': 'University of Colorado, Boulder',
'DN': 'University of Colorado, Denver',
'CS': 'University of Colorado, Colorado Springs'
}
def requiredFields(self):
return ['campus', 'fcqs', 'courses', 'instructors', 'departments'
|
, 'colleges', 'id']
def fields(self):
return {
'campus': (self.is_in_list(self.CAMPUS_CODES), ),
'fcq
|
s': (self.is_list, self.schema_list_check(self.is_string, )),
'grades': (self.is_list, self.schema_list_check(self.is_string, ),),
'courses': (self.is_list, self.schema_list_check(self.is_string, )),
'instructors': (self.is_list, self.schema_list_check(self.is_string, )),
'departments': (self.is_list, self.schema_list_check(self.is_string, )),
'colleges': (self.is_list, self.schema_list_check(self.is_string, )),
'id': (self.is_string, self.is_not_empty, ),
}
def default(self):
return {
'campus': '',
'fcqs': [],
'grades': [],
'courses': [],
'instructors': [],
'departments': [],
'colleges': [],
'id': '',
}
|
D4wN/brickv
|
src/build_data/windows/OpenGL/GL/NV/texture_env_combine4.py
|
Python
|
gpl-2.0
| 1,343
| 0.026806
|
'''OpenGL extension NV.texture_env_combine4
This module customises the behaviour of the
OpenGL.raw.GL.NV.texture_env_combine4 to provide a more
Python-friendly API
Overview (from the spec)
New texture environment function COMBINE4_NV allows programmable
texture combiner operations, including
ADD
|
Arg0 * Arg1 + Arg2 * Arg3
ADD_SIGNED_EXT Arg0 * Arg1 + Arg2 * Arg3 - 0.5
where Arg0, Arg1, Arg2 and Arg3 are derived from
ZERO the value 0
PRIMARY_COLOR_EXT primary color of incoming fragment
TEXTURE texture color of corresponding texture unit
CONSTANT_EXT texture environment constant color
PREVIOUS_EXT result of previous texture environment; on
|
texture unit 0, this maps to PRIMARY_COLOR_EXT
TEXTURE<n>_ARB texture color of the <n>th texture unit
In addition, the result may be scaled by 1.0, 2.0 or 4.0.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/texture_env_combine4.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.texture_env_combine4 import *
### END AUTOGENERATED SECTION
|
noironetworks/aci-integration-module
|
aim/tests/unit/test_infra_manager.py
|
Python
|
apache-2.0
| 3,823
| 0
|
# Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from aim import aim_manager
from aim.api import infra
from aim import config # noqa
from aim.db import infra_model
from aim.tests import base
class TestAimInfraManager(base.TestAimDBBase):
def setUp(self):
super(TestAimInfraManager, self).setUp()
self.mgr = aim_manager.AimManager()
self.infra_mgr = infra_model.HostLinkManager(self.ctx, self.mgr)
def test_infra_manager(self):
host, ifname, ifmac, swid, module, port, path, pod_id, from_config = (
'f5-compute-2.noiro.lab', 'opflex1', 'd4:6d:50:dc:72:5f', 101,
1, 1, 'topology/pod-2/paths-101/pathep-[eth1/1]', 2, True)
self.infra_mgr.add_hostlink(host, ifname, ifmac, swid, module, port,
path, pod_id, from_config)
hlinks_mgr = self.mgr.find(self.ctx, infra.HostLink)
self.assertEqual(1, len(hlinks_mgr))
hlink = self.infra_mgr.get_hostlink(host, ifname)
self.assertEqual(hlink.path, hlinks_mgr[0].path)
hlinks = self.infra_mgr.get_hostlinks()
self.assertEqual(1, len(hlinks))
self.assertEqual(hlinks[0].path, hlinks_mgr[0].path)
(host2, ifname2, ifmac2, swid2, module2, port2, path2,
pod_id2, from_config2) = (
'f6-compute-2.noiro.lab', 'opflex1', 'd4:6d:50:dc:72:55', 102,
2, 2, 'topology/pod-1/paths-102/pathep-[eth2/2]', 1, False)
self.infra_mgr.add_hostlink(
host2, ifname2, ifmac2, swid2, module2, port2, path2,
pod_id2, from_config2)
hlinks = self.infra_mgr.get_hostlinks_for_host(
'f5-compute-2.noiro.lab')
self.assertEqual(hlinks[0].path, hlinks_mgr[0].path)
hlinks = self.infra_mgr.get_hostlinks_for_host_switchport(
host, swid, module, port)
self.assertEqual(hlinks[0].path, hlinks_mgr[0].path)
hlinks = self.infra_mgr.get_hostlinks_for_switchport(
swid, module, port)
self.assertEqual(hlinks[0].path, hlinks_mgr[0].path)
hlinks = self.infra_mgr.get_modules_for_switch(swid)
self.assertEqual(hlinks[0], (hlinks_mgr[0].module,))
hlinks = self.infra_mgr.get_ports_for_switch_module(swid, module)
self.assertEqual(hlinks[0], (hlinks_mgr[0].port,))
hlinks = self.infra_mgr.get_switch_and_port_for_host(host)
self.assertEqual(hlinks[0], (hlinks_mgr[0].switch_id,
hlinks_mgr[0].module, hlinks_mgr[0].port,
hlinks_mgr[0].interface_name,
hlinks_mgr[0].pod_i
|
d))
# Verify overwrite
port2 = 3
self.infra_mgr.add_hostlink(
host2, ifname2, ifmac2, swid2, module2, port2, path2,
pod_id2, from_config2)
|
hlinks = self.infra_mgr.get_hostlinks_for_host(
'f6-compute-2.noiro.lab')
self.assertEqual('3', hlinks[0].port)
self.infra_mgr.delete_hostlink(host, ifname)
# Idempotent
self.infra_mgr.delete_hostlink(host, ifname)
self.infra_mgr.delete_hostlink(host2, ifname2)
self.assertEqual(0, len(self.mgr.find(self.ctx, infra.HostLink)))
|
mbernson/iscp-search-engine
|
retrouve/__init__.py
|
Python
|
gpl-3.0
| 53
| 0.018868
|
fr
|
om dote
|
nv import load_dotenv
load_dotenv('./.env')
|
lmingcsce/p4factory
|
targets/sai_p4/tests/ptf-tests/sai_thrift/sai_base_test.py
|
Python
|
apache-2.0
| 1,788
| 0.002796
|
"""
Base classes for test cases
Tests will usually inherit from one of these classes to have the controller
and/or dataplane automatically set up.
"""
import os
import lo
|
gging
import unittest
import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.datapl
|
ane as dataplane
################################################################
#
# Thrift interface base tests
#
################################################################
import p4_sai_rpc.sai_p4_sai as p4_sai_rpc
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
class ThriftInterface(BaseTest):
def setUp(self):
BaseTest.setUp(self)
# Set up thrift client and contact server
self.transport = TSocket.TSocket('localhost', 9091)
self.transport = TTransport.TBufferedTransport(self.transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = p4_sai_rpc.Client(self.protocol)
self.transport.open()
def tearDown(self):
if config["log_dir"] != None:
self.dataplane.stop_pcap()
BaseTest.tearDown(self)
self.transport.close()
class ThriftInterfaceDataPlane(ThriftInterface):
"""
Root class that sets up the thrift interface and dataplane
"""
def setUp(self):
ThriftInterface.setUp(self)
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
def tearDown(self):
if config["log_dir"] != None:
self.dataplane.stop_pcap()
ThriftInterface.tearDown(self)
|
corpusmusic/bb-cluster
|
obsolete_scripts/rn_header.py
|
Python
|
gpl-3.0
| 204
| 0.004902
|
rn = ['I', 'bII', 'II', 'bIII', 'III', 'IV', 'bV', 'V
|
', 'bVI', 'VI', 'bVII', 'VII']
header = []
for n in rn:
for next_n in rn:
header.ap
|
pend(n + '-' + next_n)
print(header)
print(len(header))
|
abinashk-inf/AstroBox
|
src/astroprint/about/__init__.py
|
Python
|
agpl-3.0
| 441
| 0.004535
|
from flask import Flask
from flask import render
|
_template
app = Flask(__name__)
def info():
user = {
'nickname': 'Praful',
'key': 'A154XWA256'
}
posts = {
'company': 'Ethereal Machines',
'link': 'http://www.etherealmachines.com/',
'firmware' : 'Marlin Firmware',
'version': 'v1.01'
}
return render_template(
'index.html', user=user, title= 'Try',
|
posts=posts)
|
eliostvs/django-kb-example
|
example/settings/settings/base.py
|
Python
|
bsd-3-clause
| 7,689
| 0.00065
|
from __future__ import unicode_literals
from os.path import abspath, basename, dirname, join, normpath
from sys import path
import markdown
"""Common settings and globals."""
# PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(SITE_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
# END PATH CONFIGURATION
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# END DATABASE CONFIGURATION
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Sao_Paulo'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# See: https://docs.djangoproject.com/en/1.6/ref/settings/#locale-paths
LOCALE_PATHS = (
join(SITE_ROOT, 'locale'),
)
# END GENERAL CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = r"%wg@#2myqx)@0p3b%(h1deucjrka2+%kqb*ze^37m0+_f-wxr)"
# END SECRET CONFIGURATION
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# END SITE CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
# END FIXTURE CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
# END TEMPLATE CONFIGURATION
# MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'kb.middleware.KnowledgeMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.settings.urls' % SITE_NAME
# END URL CONFIGURATION
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
|
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'taggit',
'haystack',
'crispy_forms',
'djangosecure',
'kb',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'example',
)
|
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# END LOGGING CONFIGURATION
# AUTHENTICATION CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-LOGIN_REDIRECT_URL
LOGIN_REDIRECT_URL = '/'
# END AUTHENTICATION CONFIGURATION
# THIRD PARTY CONFIGURATION
# http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
}
KNOWLEDGE_SETTINGS = {
'DEFAULT_SEARCH_FORM_CLASS': 'example.forms.ExampleSearchForm',
}
MARKUP_FIELD_TYPES = (
('markdown', markdown.markdown),
)
# END THIRD PARTY CONFIGURATION
|
APSL/puput-demo
|
config/settings/common.py
|
Python
|
mit
| 7,805
| 0.001025
|
# -*- coding: utf-8 -*-
"""
Django settings for puput_demo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
from puput import PUPUT_APPS
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('puput-demo')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Admin
'django.contrib.admin',
)
INSTALLED_APPS = DJANGO_APPS + PUPUT_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware'
)
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///puput-demo"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format':
|
'%(levelname)s %(asctime)s %(module)s '
'%(process)d %(threa
|
d)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'loggers': {
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True,
},
},
}
}
WAGTAIL_SITE_NAME = 'Demo'
|
fogelomer/cloudify-filebeat-plugin
|
setup.py
|
Python
|
apache-2.0
| 1,438
| 0
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
# Replace the place holders with values for your project
setup(
# Do not use underscores in the plugin name.
name='cloudify-filebeat-plugin',
version='0.1',
author='Gigaspac
|
es',
author_email='cosmo-admin@gigaspaces.com',
description='plugin for running logging interface (based on Filebeats)',
# This mu
|
st correspond to the actual packages in the plugin.
packages=['filebeat_plugin'],
package_data={'filebeat_plugin': ['resources/filebeat.yml']},
license='LICENSE',
zip_safe=False,
install_requires=[
# Necessary dependency for developing plugins, do not remove!
'cloudify-plugins-common>=3.4m5', 'distro==0.6.0'
],
test_requires=[
'cloudify-dsl-parser>=3.4m5', 'nose',
]
)
|
oblitum/YouCompleteMe
|
python/ycm/vimsupport.py
|
Python
|
gpl-3.0
| 40,365
| 0.0301
|
# Copyright (C) 2011-2012 Google Inc.
# 2016 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from future.utils import iterkeys
import vim
import os
import json
import re
from collections import defaultdict
from ycmd.utils import ( ByteOffsetToCodepointOffset, GetCurrentDirectory,
JoinLinesAsUnicode, ToBytes, ToUnicode )
from ycmd import user_options_store
BUFFER_COMMAND_MAP = { 'same-buffer' : 'edit',
'horizontal-split' : 'split',
'vertical-split' : 'vsplit',
'new-tab' : 'tabedit' }
FIXIT_OPENING_BUFFERS_MESSAGE_FORMAT = (
'The requested operation will apply changes to {0} files which are not '
'currently open. This will therefore open {0} new files in the hidden '
'buffers. The quickfix list can then be used to review the changes. No '
'files will be written to disk. Do you wish to continue?' )
potential_hint_triggers = list( map( ToBytes, [ '[', '(', ',', ':' ] ) )
def CanComplete():
"""Returns whether it's appropriate to provide any completion at the current
line and column."""
try:
line, column = LineAndColumnAfterLastNonWhitespace()
except TypeError:
return False
if ( line, column ) == CurrentLineAndColumn():
return True
return ( ToBytes( vim.current.buffer[ line ][ column - 1 ] )
in potential_hint_triggers )
def SnappedLineAndColumn():
"""Will return CurrentLineAndColumn(), except when there's solely whitespace
between caret and a potential hint trigger, where it "snaps to trigger",
returning hint trigger's line and column instead."""
try:
line, column = LineAndColumnAfterLastNonWhitespace()
except TypeError:
return CurrentLineAndColumn()
if ( ToBytes( vim.current.buffer[ line ][ column - 1 ] )
in potential_hint_triggers ):
return ( line, column )
return CurrentLineAndColumn()
def LineAndColumnAfterLastNonWhitespace():
line, column = CurrentLineAndColumn()
line_value = vim.current.line[ :column ].rstrip()
while not line_value:
line = line - 1
if line == -1:
return None
line_value = vim.current.buffer[ line ].rstrip()
return line, len( line_value )
NO_SELECTION_MADE_MSG = "No valid selection was made; aborting."
def CurrentLineAndColumn():
"""Returns the 0-based current line and 0-based current column."""
# See the comment in CurrentColumn about the calculation for the line and
# column number
line, column = vim.current.window.cursor
line -= 1
return line, column
def CurrentColumn():
"""Returns the 0-based current column. Do NOT access the CurrentColumn in
vim.current.line. It doesn't exist yet when the cursor is at the end of the
line. Only the chars before the current column exist in vim.current.line."""
# vim's columns are 1-based while vim.current.line columns are 0-based
# ... but vim.current.window.cursor (which returns a (line, column) tuple)
# columns are 0-based, while the line from that same tuple is 1-based.
# vim.buffers buffer objects OTOH have 0-based lines and columns.
# Pigs have wings and I'm a loopy purple duck. Everything makes sense now.
return vim.current.window.cursor[ 1 ]
def CurrentLineContents():
return ToUnicode( vim.current.line )
def CurrentLineContentsAndCodepointColumn():
"""Returns the line contents as a unicode string and the 0-based current
column as a codepoint offset. If the current column is outside the line,
returns the column position at the end of the line."""
line = CurrentLineContents()
byte_column = CurrentColumn()
# ByteOffsetToCodepointOffset expects 1-based offset.
column = ByteOffsetToCodepointOffset( line, byte_column + 1 ) - 1
return line, column
def TextAfterCursor():
"""Returns the text after CurrentColumn."""
return ToUnicode( vim.current.line[ CurrentColumn(): ] )
def TextBeforeCursor():
"""Returns the text before CurrentColumn."""
return ToUnicode( vim.current.line[ :CurrentColumn() ] )
# Note the difference between buffer OPTIONS and VARIABLES; the two are not
# the same.
def GetBufferOption( buffer_object, option ):
# NOTE: We used to check for the 'options' property on the buffer_object which
# is available in recent versions of Vim and would then use:
#
# buffer_object.options[ option ]
#
# to read the value, BUT this caused annoying flickering when the
# buffer_object was a hidden buffer (with option = 'ft'). This was all due to
# a Vim bug. Until this is fixed, we won't use it.
to_eval = 'getbufvar({0}, "&{1}")'.format( buffer_object.number, option )
return GetVariableValue( to_eval )
def BufferModified( buffer_object ):
return bool( int( GetBufferOption( buffer_object, 'mod' ) ) )
def GetUnsavedAndSpecifiedBufferData( including_filepath ):
"""Build part of the request containing the contents and filetypes of all
dirty buffers as well as the buffer with filepath |including_filepath|."""
buffers_data = {}
for buffer_object in vim.buffers:
buffer_filepath = GetBufferFilepath( buffer_object )
if not ( BufferModified( buffer_object ) or
buffer_filepath == including_filepath ):
continue
buffers_data[ buffer_filepath ] = {
# Add a newline to match what gets saved to disk. See #1455 for details.
'contents': JoinLinesAsUnicode( buffer_object ) + '\n',
'filetypes': FiletypesForBuffer( buffer_object )
}
return buffers_data
def GetBufferNumberForFilename( filename, open_file_if_needed = True ):
return GetIntValue( u"bufnr('{0}', {1})".format(
EscapeForVim( os.path.realpath( filename ) ),
int( open_file_if_needed ) ) )
def GetCurrentBufferFilepath():
return GetBufferFilepath( vim.current.buffer )
def BufferIsVisible( buffer_number ):
if buffer_number < 0:
return False
window_number = GetIntValue( "bufwinnr({0})".format( buffer_number ) )
return window_number != -1
def GetBufferFilepath( buffer_object ):
if buffer_object.name:
return buffer_object.name
# Buffers that have just been created by a command like :enew don't have any
# buffer name so we use th
|
e buffer number for that.
return os.path.join( GetCurrentDirectory(), str( buffer_object.number ) )
def GetCurrentBufferNumber():
return vim.current.buffer.number
def GetBufferChangedTick( bufnr ):
return GetIntValue( 'getbufvar({0}, "changedtick")'.format( bufnr ) )
def UnplaceSignInBuffer( buffer_
|
number, sign_id ):
if buffer_number < 0:
return
vim.command(
'try | exec "sign unplace {0} buffer={1}" | catch /E158/ | endtry'.format(
sign_id, buffer_number ) )
def PlaceSign( sign_id, line_num, buffer_num, is_error = True ):
# libclang can give us diagnostics that point "outside" the file; Vim borks
# on these.
if line_num < 1:
line_num = 1
sign_name = 'YcmError' if is_error else 'YcmWarning'
vim.command( 'sign place {0} name={1} line={2} buffer={3}'.format(
sign_id, sign_name, line_num, buffer_num ) )
def ClearYcmSyntaxMatches():
matches = VimExpressionToPythonType( 'getmatches()' )
for match in matches:
if match[ 'group' ].startswith( 'Ycm' ):
vim.eval( 'matchdelete({0})'.format( match[ 'id' ] ) )
def AddDi
|
cXhristian/django-wiki
|
tests/core/test_template_tags.py
|
Python
|
gpl-3.0
| 10,375
| 0.000193
|
"""
Almost all test cases covers both tag calling and template using.
"""
from __future__ import print_function, unicode_literals
from django.conf import settings as django_settings
from django.contrib.contenttypes.models import ContentType
from django.http import HttpRequest
from django.utils.six import assertCountEqual
from wiki.conf import settings
from wiki.forms import CreateRootForm
from wiki.models import Article, ArticleForObject, ArticleRevision
from wiki.templatetags.wiki_tags import (article_for_object, login_url,
wiki_form, wiki_render)
from ..base import TemplateTestCase
if not django_settings.configured:
django_settings.configure()
# XXX article_for_object accepts context, but not using it
class ArticleForObjectTemplatetagTest(TemplateTestCase):
template = """
{% load wiki_tags %}
{% article_for_object obj as anything %}
{{ anything }}
"""
def setUp(self):
super(ArticleForObjectTemplatetagTest, self).setUp()
from wiki.templatetags import wiki_tags
wiki_tags._cache = {}
def test_obj_arg_is_not_a_django_model(self):
from wiki.templatetags import wiki_tags
with self.assertRaises(TypeError):
article_for_object({}, '')
with self.assertRaises(TypeError):
article_for_object({'request': 100500}, {})
with self.assertRaises(TypeError):
self.render({'obj': 'tiger!'})
self.assertEqual(len(wiki_tags._cache), 0)
def test_obj_is_not_in__cache_and_articleforobject_is_not_exist(self):
from wiki.templatetags.wiki_tags import _cache as cache
obj = Article.objects.create()
article_for_object({}, obj)
self.assertIn(obj, cache)
self.assertIsNone(cache[obj])
self.assertEqual(len(cache), 1)
self.render({'obj': obj})
self.assertIn(obj, cache)
self.assertIsNone(cache[obj])
self.assertEqual(len(cache), 1)
def test_obj_is_not_in__cache_and_articleforobjec_is_exist(self):
from wiki.templatetags.wiki_tags import _cache as cache
a = Article.objects.create()
content_type = ContentType.objects.get_for_model(a)
ArticleForObject.objects.create(
article=a,
content_type=content_type,
object_id=1
)
output = article_for_object({}, a)
self.assertEqual(output, a)
self.assertIn(a, cache)
self.assertEqual(cache[a], a)
self.assertEqual(len(cache), 1)
self.render({'obj': a})
self.assertIn(a, cache)
self.assertEqual(cache[a], a)
self.assertEqual(len(cache), 1)
def test_obj_in__cache_and_articleforobject_is_not_exist(self):
model = Article.objects.create()
from wiki.templatetags import wiki_tags
wiki_tags._cache = {model: 'spam'}
article_for_object({}, model)
self.assertIn(model, wiki_tags._cache)
self.assertIsNone(wiki_tags._cache[model])
self.assertEqual(len(wiki_tags._cache), 1)
self.render({'obj': model})
self.assertIn(model, wiki_tags._cache)
self.assertIsNone(wiki_tags._cache[model])
self.assertEqual(len(wiki_tags._cache), 1)
self.assertNotIn('spam', wiki_tags._cache.values())
def test_obj_in__cache_and_articleforobjec_is_exist(self):
article = Article.objects.create()
content_type = ContentType.objects.get_for_mod
|
el(article)
ArticleForObject.objects.create(
article=article,
content_type=content_type,
object_id=1
)
from wiki.templatetags import wiki_tags
wiki_tags._cache = {article: 'spam'}
output = article_for_object({}, article)
self.assertEqual(output, article)
self.assertIn(article, wiki_tags._cache)
self.assertEqual(wiki_tags._cache[article], article
|
)
output = self.render({'obj': article})
self.assertIn(article, wiki_tags._cache)
self.assertEqual(wiki_tags._cache[article], article)
expected = 'Article without content (1)'
self.assertIn(expected, output)
# TODO manage plugins in template
class WikiRenderTest(TemplateTestCase):
template = """
{% load wiki_tags %}
{% wiki_render article pc %}
"""
def tearDown(self):
from wiki.core.plugins import registry
registry._cache = {}
super(WikiRenderTest, self).tearDown()
keys = ['article',
'content',
'preview',
'plugins',
'STATIC_URL',
'CACHE_TIMEOUT'
]
def test_if_preview_content_is_none(self):
# monkey patch
from wiki.core.plugins import registry
registry._cache = {'ham': 'spam'}
article = Article.objects.create()
output = wiki_render({}, article)
assertCountEqual(self, self.keys, output)
self.assertEqual(output['article'], article)
self.assertIsNone(output['content'])
self.assertIs(output['preview'], False)
self.assertEqual(output['plugins'], {'ham': 'spam'})
self.assertEqual(output['STATIC_URL'], django_settings.STATIC_URL)
self.assertEqual(output['CACHE_TIMEOUT'], settings.CACHE_TIMEOUT)
# Additional check
self.render({'article': article, 'pc': None})
def test_called_with_preview_content_and_article_have_current_revision(self):
article = Article.objects.create()
ArticleRevision.objects.create(
article=article,
title="Test title",
content="Some beauty test text"
)
content = (
"""This is a normal paragraph\n"""
"""\n"""
"""Headline\n"""
"""========\n"""
)
expected_markdown = (
"""<p>This is a normal paragraph</p>\n"""
"""<h1 id="wiki-toc-headline">Headline</h1>"""
)
# monkey patch
from wiki.core.plugins import registry
registry._cache = {'spam': 'eggs'}
output = wiki_render({}, article, preview_content=content)
assertCountEqual(self, self.keys, output)
self.assertEqual(output['article'], article)
self.assertMultiLineEqual(output['content'], expected_markdown)
self.assertIs(output['preview'], True)
self.assertEqual(output['plugins'], {'spam': 'eggs'})
self.assertEqual(output['STATIC_URL'], django_settings.STATIC_URL)
self.assertEqual(output['CACHE_TIMEOUT'], settings.CACHE_TIMEOUT)
output = self.render({'article': article, 'pc': content})
self.assertIn(expected_markdown, output)
def test_called_with_preview_content_and_article_dont_have_current_revision(
self):
article = Article.objects.create()
content = (
"""This is a normal paragraph\n"""
"""\n"""
"""Headline\n"""
"""========\n"""
)
# monkey patch
from wiki.core.plugins import registry
registry._cache = {'spam': 'eggs'}
output = wiki_render({}, article, preview_content=content)
assertCountEqual(self, self.keys, output)
self.assertEqual(output['article'], article)
self.assertMultiLineEqual(output['content'], '')
self.assertIs(output['preview'], True)
self.assertEqual(output['plugins'], {'spam': 'eggs'})
self.assertEqual(output['STATIC_URL'], django_settings.STATIC_URL)
self.assertEqual(output['CACHE_TIMEOUT'], settings.CACHE_TIMEOUT)
self.render({'article': article, 'pc': content})
class WikiFormTest(TemplateTestCase):
template = """
{% load wiki_tags %}
{% wiki_form form_obj %}
"""
def test_form_obj_is_not_baseform_instance(self):
context = {'test_key': 'test_value'}
form_obj = 'ham'
with self.assertRaises(TypeError):
wiki_form(context, form_obj)
self.assertEqual(context, {'test_key': 'test_value'})
with self.assertRaises(TypeError):
|
Ebag333/Pyfa
|
eos/effects/missilethermaldmgbonusheavy.py
|
Python
|
gpl-3.0
| 369
| 0.00542
|
# missileThermalDmgBonusHeavy
#
# Used by:
# Implants named like: Zainou 'Snapshot' Heavy Missiles HM (6 of 6)
type = "passive"
def handler(fit, container, context):
fit.modules.filteredChargeBoost(lambda mod: mod.charge.requiresSkill("Heavy Mis
|
siles"),
"thermalDamage", container.ge
|
tModifiedItemAttr("damageMultiplierBonus"))
|
cedriclaunay/gaffer
|
python/GafferImageTest/ImageReaderTest.py
|
Python
|
bsd-3-clause
| 10,516
| 0.048117
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import shutil
import unittest
import IECore
import Gaffer
import GafferImage
import GafferImageTest
class ImageReaderTest( unittest.TestCase ) :
__testDir = "/tmp/imageReaderTest"
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/checker.exr" )
offsetDataWindowFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/rgb.100x100.exr" )
negativeDataWindowFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/checkerWithNegativeDataWindow.200x150.exr" )
negativeDisplayWindowFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/negativeDisplayWindow.exr" )
circlesExrFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/circles.exr" )
circlesJpgFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/circles.jpg" )
def testInternalImageSpaceConversion( self ) :
r = IECore.EXRImageReader( self.negativeDataWindowFileName )
image = r.read()
exrDisplayWindow = image.displayWindow
exrDataWindow = image.dataWindow
n = GafferImage.ImageReader()
n["fileName"].setValue( self.negativeDataWindowFileName )
internalDisplayWindow = n["out"]["format"].getValue().getDisplayWindow()
internalDataWindow = n["out"]["dataWindow"].getValue()
expectedDataWindow = IECore.Box2i( IECore.V2i( exrDataWindow.min.x, exrDisplayWindow.max.y - exrDataWindow.max.y ), IECore.V2i( exrDataWindow.max.x, exrDisplayWindow.max.y - exrDataWindow.min.y ) )
self.assertEqual( internalDisplayWindow, exrDisplayWindow )
self.assertEqual( internalDataWindow, expectedDataWindow )
def test( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
self.assertEqual( n["out"]["dataWindow"].getValue(), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 199, 149 ) ) )
self.assertEqual( n["out"]["format"].getValue().getDisplayWindow(), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 199, 149 ) ) )
channelNames = n["out"]["channelNames"].getValue()
self.failUnless( isinstance( channelNames, IECore.StringVectorData ) )
self.failUnless( "R" in channelNames )
self.failUnless( "G" in channelNames )
self.failUnless( "B" in channelNames )
self.failUnless( "A" in channelNames )
image = n["out"].image()
image2 = IECore.Reader.create( self.fileName ).read()
image.blindData().clear()
image2.blindData().clear()
self.assertEqual( image, image2 )
def testNegativeDisplayWindowRead( self ) :
n = GafferImage.Ima
|
geReader()
n["fileName"].setValue( self.negativeDisplayWindowFileName )
f = n["out"]["format"].getValue()
d = n["out"]["dataWindow"].getValue()
self.assertEqual( f.getDisplayWindow(), IECore.Box2i( IECore.V2i( -5, -5 ), IECore.V2i( 20, 20 ) ) )
self.assertEqual
|
( d, IECore.Box2i( IECore.V2i( 2, -14 ), IECore.V2i( 35, 19 ) ) )
expectedImage = IECore.Reader.create( self.negativeDisplayWindowFileName ).read()
expectedImage.blindData().clear()
self.assertEqual( expectedImage, n["out"].image() )
def testNegativeDataWindow( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.negativeDataWindowFileName )
self.assertEqual( n["out"]["dataWindow"].getValue(), IECore.Box2i( IECore.V2i( -25, -30 ), IECore.V2i( 174, 119 ) ) )
self.assertEqual( n["out"]["format"].getValue().getDisplayWindow(), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 199, 149 ) ) )
channelNames = n["out"]["channelNames"].getValue()
self.failUnless( isinstance( channelNames, IECore.StringVectorData ) )
self.failUnless( "R" in channelNames )
self.failUnless( "G" in channelNames )
self.failUnless( "B" in channelNames )
image = n["out"].image()
image2 = IECore.Reader.create( self.negativeDataWindowFileName ).read()
op = IECore.ImageDiffOp()
res = op(
imageA = image,
imageB = image2
)
self.assertFalse( res.value )
def testTileSize( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
tile = n["out"].channelData( "R", IECore.V2i( 0 ) )
self.assertEqual( len( tile ), GafferImage.ImagePlug().tileSize() **2 )
def testNoCaching( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
c = Gaffer.Context()
c["image:channelName"] = "R"
c["image:tileOrigin"] = IECore.V2i( 0 )
with c :
# using _copy=False is not recommended anywhere outside
# of these tests.
t1 = n["out"]["channelData"].getValue( _copy=False )
t2 = n["out"]["channelData"].getValue( _copy=False )
# we don't want the separate computations to result in the
# same value, because the ImageReader has its own cache in
# OIIO, so doing any caching on top of that would be wasteful.
self.failIf( t1.isSame( t2 ) )
def testNonexistentFile( self ) :
n = GafferImage.ImageReader()
n["out"]["channelNames"].getValue()
n["out"].channelData( "R", IECore.V2i( 0 ) )
def testNoOIIOErrorBufferOverflows( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( "thisReallyReallyReallyReallyReallyReallyReallyReallyReallyLongFilenameDoesNotExist.tif" )
for i in range( 0, 300000 ) :
with IECore.IgnoredExceptions( Exception ) :
n["out"]["dataWindow"].getValue()
def testChannelDataHashes( self ) :
# Test that two tiles within the same image have different hashes.
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
h1 = n["out"].channelData( "R", IECore.V2i( 0 ) ).hash()
h2 = n["out"].channelData( "R", IECore.V2i( GafferImage.ImagePlug().tileSize() ) ).hash()
self.assertNotEqual( h1, h2 )
def testDisabledChannelDataHashes( self ) :
# Test that two tiles within the same image have the same hash when disabled.
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
n["enabled"].setValue( False )
h1 = n["out"].channelData( "R", IECore.V2i( 0 ) ).hash()
h2 = n["out"].channelData( "R", IECore.V2i( GafferImage.ImagePlug().tileSize() ) ).hash()
self.assertEqual( h1, h2 )
def testOffsetDataWindowOrigin( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.offsetDataWindowFileName )
image = n["out"].image()
image2 = IECore.Reader.create( self.offsetDataWindowFileName ).read()
image.blindData().clear()
image2.blindData().clear()
self.assertEqual( image, image2 )
def testJpgRead( self ) :
exrReader = GafferImage
|
Sonicbids/django
|
tests/migrations/test_executor.py
|
Python
|
bsd-3-clause
| 19,946
| 0.001604
|
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.graph import MigrationGraph
from django.test import modify_settings, override_settings, TestCase
from django.apps.registry import apps as global_apps
from .test_base import MigrationTestBase
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
class ExecutorTests(MigrationTestBase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = ["migrations", "migrations2", "django.contrib.auth", "django.contrib.contenttypes"]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.l
|
oader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the
|
tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_run_with_squashed(self):
"""
Tests running a squashed migration from zero (should ignore what it replaces)
"""
executor = MigrationExecutor(connection)
# Check our leaf node is the squashed one
leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"]
self.assertEqual(leaves, [("migrations", "0001_squashed_0002")])
# Check the plan
plan = executor.migration_plan([("migrations", "0001_squashed_0002")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0001_squashed_0002")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did. Should also just use squashed.
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2",
})
def test_empty_plan(self):
"""
Tests that re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
executor = MigrationExecutor(connection)
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate([
("migrations", "0002_second"),
("migrations2", "0001_initial")
], fake=True)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Now plan a second time and make sure it's empty
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(plan, [])
# Erase all the fake records
executor.recorder.record_unapplied("migrations2", "0001_initial")
executor.recorder.record_unapplied("migrations", "0002_second")
executor.recorder.record_unapplied("migrations", "0001_initial")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_soft_apply(self):
"""
Tests detection of initial migrations already having been applied.
"""
state = {"faked": None}
def fake_storer(phase, migration=None, fake=None):
state["faked"] = fake
executor = MigrationExecutor(connection, progress_callback=fake_storer)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run it normally
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# We shouldn't have faked that one
self.assertEqual(state["faked"], False)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Fake-reverse that
executor.migrate([("migrations", None)], fake=True)
# Are the tables still there?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure that was faked
self.assertEqual(state["faked"], True)
# Finally, migrate forwards; this should fake-apply our initial migration
executor.loader.build_graph()
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
self.assertEqual(state["faked"], True)
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", N
|
zooniverse/aggregation
|
docs/source/images/rectangle_overlap.py
|
Python
|
apache-2.0
| 713
| 0.093969
|
import cv2
import numpy as np
template = np.zeros((500,500,3),np.uint8)
template[:,:,0] = 255
template[:,:,1] = 255
template[:,:,2] = 255
x = [50,250,250,50,50]
y = [50,50,250,250,50]
cnt = np.asarray(zip(x,y))
cv2.drawContours(template,[cnt],0,0,1)
x = [100,200,200,100,100]
y = [300,300,150,150,300]
cnt = np.asarray(zip(x
|
,y))
cv2.drawContours(template,[cnt],0,0,1)
x = [150,400,400,150,150]
y = [200,200,400,400,200]
cnt = np.asarray(zip(x,y))
cv2.drawContours(template,[cnt],0,0,1)
|
x = [150,200,200,150,150]
y = [250,250,200,200,250]
cnt = np.asarray(zip(x,y))
cv2.drawContours(template,[cnt],0,(255,0,0),-1)
cv2.imwrite("/home/ggdhines/github/aggregation/docs/images/rectangle_overlap.jpg",template)
|
cloudbau/nova
|
nova/tests/api/openstack/compute/test_limits.py
|
Python
|
apache-2.0
| 35,992
| 0.00025
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
import httplib
import StringIO
from xml.dom import minidom
from lxml import etree
import webob
from nova.api.openstack.compute import limits
from nova.api.openstack.compute import views
from nova.api.openstack import xmlutil
import nova.context
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
from nova import utils
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1,
utils.TIME_UNITS['MINUTE']),
limits.Limit("POST", "*", ".*", 7, utils.TIME_UNITS['MINUTE']),
limits.Limit("POST", "/servers", "^/servers", 3,
utils.TIME_UNITS['MINUTE']),
limits.Limit("PUT", "*", "", 10, utils.TIME_UNITS['MINUTE']),
limits.Limit("PUT", "/servers", "^/servers", 5,
utils.TIME_UNITS['MINUTE']),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
class BaseLimitTestSuite(test.NoDBTestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return dict((k, dict(limit=v))
for k, v in self.absolute_limits.items())
self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTest(BaseLimitTestSuite):
"""
Tests for `limits.LimitsController` class.
"""
def setUp(self):
"""Run before each test."""
super(LimitsControllerTest, self).setUp()
self.controller = limits.create_resource()
self.ctrler = limits.LimitsController()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = nova.context.RequestContext('testuser', 'testproject')
request.environ["nova.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["nova.limits"] = _limits
return request
def test_empty_index_json(self):
# Test getting empty limit details in JSON.
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
# Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'ram': 512,
'instances': 5,
'cores': 21,
'key_pairs': 10,
'fl
|
oating_ips': 10,
'security_groups': 10,
'security_group_rules': 20,
}
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
|
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00Z",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {
"maxTotalRAMSize": 512,
"maxTotalInstances": 5,
"maxTotalCores": 21,
"maxTotalKeypairs": 10,
"maxTotalFloatingIps": 10,
"maxSecurityGroups": 10,
"maxSecurityGroupRules": 20,
},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["nova.limits"] = _limits
return request
def test_index_diff_regex(self):
# Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutil
|
redhat-openstack/trove
|
trove/db/sqlalchemy/migrate_repo/versions/006_dns_records.py
|
Python
|
apache-2.0
| 1,320
| 0
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the Licens
|
e.
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import create_tables
from trove.db.sqlalchemy.migrate_repo.schema import drop_tables
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
meta = MetaData()
dns_records = Table(
'dns_records', meta,
Column('name', String(length=255), primary_key=True),
Column('record_id', String(length=64)))
def upgrade(migrate_engine):
meta.bind = migrate_engine
create_tables([dns_records])
def downgrade(migrate_engine):
meta.bind = migrate_engine
drop_tables([dns_records])
|
bobrathbone/piradio
|
rotary_class.py
|
Python
|
gpl-3.0
| 6,950
| 0.004317
|
#!/usr/bin/env python
# Raspberry Pi Rotary Encoder Class
# $Id: rotary_class.py,v 1.7 2017/01/07 11:38:47 bob Exp $
#
# Copyright 2011 Ben Buxton. Licenced under the GNU GPL Version 3.
# Contact: bb@cactii.net
# Adapted by : Bob Rathbone and Lubos Ruckl (Czech republic)
# Site : http://www.bobrathbone.com
#
# This class uses standard rotary encoder with push switch
# License: GNU V3, See https://www.gnu.org/copyleft/gpl.html
#
# Disclaimer: Software is provided as is and absolutly no warranties are implied or given.
# The authors shall not be liable for any loss or damage however caused.
#
#
# A typical mechanical rotary encoder emits a two bit gray code
# on 3 output pins. Every step in the output (often accompanied
# by a physical 'click') generates a specific sequence of output
# codes on the pins.
#
# There are 3 pins used for the rotary encoding - one common and
# two 'bit' pins.
#
# The following is the typical sequence of code on the output when
# moving from one step to the next:
#
# Position Bit1 Bit2
# ----------------------
# Step1 0 0
# 1/4 1 0
# 1/2 1 1
# 3/4 0 1
# Step2 0 0
#
# From this table, we can see that when moving from one 'click' to
# the next, there are 4 changes in the output code.
#
# - From an initial 0 - 0, Bit1 goes high, Bit0 stays low.
# - Then both bits are high, halfway through the step.
# - Then Bit1 goes low, but Bit2 stays high.
# - Finally at the end of the step, both bits return to 0.
#
# Detecting the direction is easy - the table simply goes in the other
# direction (read up instead of down).
#
# To decode this, we use a simple state machine. Every time the output
# code changes, it follows state, until finally a full steps worth of
# code is received (in the correct order). At the final 0-0, it returns
# a value indicating a step in one direction or the other.
#
# It's also possible to use 'half-step' mode. This just emits an event
# at both the 0-0 and 1-1 positions. This might be useful for some
# encoders where you want to detect all positions.
#
# If an invalid sta
|
te happens (for example we go from '0-1' straight
# to '1-0
|
'), the state machine resets to the start until 0-0 and the
# next valid codes occur.
#
# The biggest advantage of using a state machine over other algorithms
# is that this has inherent debounce built in. Other algorithms emit spurious
# output with switch bounce, but this one will simply flip between
# sub-states until the bounce settles, then continue along the state
# machine.
# A side effect of debounce is that fast rotations can cause steps to
# be skipped. By not requiring debounce, fast rotations can be accurately
# measured.
# Another advantage is the ability to properly handle bad state, such
# as due to EMI, etc.
# It is also a lot simpler than others - a static state table and less
# than 10 lines of logic.
#
import RPi.GPIO as GPIO
R_CCW_BEGIN = 0x1
R_CW_BEGIN = 0x2
R_START_M = 0x3
R_CW_BEGIN_M = 0x4
R_CCW_BEGIN_M = 0x5
# Values returned by 'process_'
# No complete step yet.
DIR_NONE = 0x0
# Clockwise step.
DIR_CW = 0x10
# Anti-clockwise step.
DIR_CCW = 0x20
R_START = 0x0
HALF_TAB = (
# R_START (00)
(R_START_M, R_CW_BEGIN, R_CCW_BEGIN, R_START),
# R_CCW_BEGIN
(R_START_M | DIR_CCW, R_START, R_CCW_BEGIN, R_START),
# R_CW_BEGIN
(R_START_M | DIR_CW, R_CW_BEGIN, R_START, R_START),
# R_START_M (11)
(R_START_M, R_CCW_BEGIN_M, R_CW_BEGIN_M, R_START),
# R_CW_BEGIN_M
(R_START_M, R_START_M, R_CW_BEGIN_M, R_START | DIR_CW),
# R_CCW_BEGIN_M
(R_START_M, R_CCW_BEGIN_M, R_START_M, R_START | DIR_CCW),
)
R_CW_FINAL = 0x1
R_CW_BEGIN = 0x2
R_CW_NEXT = 0x3
R_CCW_BEGIN = 0x4
R_CCW_FINAL = 0x5
R_CCW_NEXT = 0x6
FULL_TAB = (
# R_START
(R_START, R_CW_BEGIN, R_CCW_BEGIN, R_START),
# R_CW_FINAL
(R_CW_NEXT, R_START, R_CW_FINAL, R_START | DIR_CW),
# R_CW_BEGIN
(R_CW_NEXT, R_CW_BEGIN, R_START, R_START),
# R_CW_NEXT
(R_CW_NEXT, R_CW_BEGIN, R_CW_FINAL, R_START),
# R_CCW_BEGIN
(R_CCW_NEXT, R_START, R_CCW_BEGIN, R_START),
# R_CCW_FINAL
(R_CCW_NEXT, R_CCW_FINAL, R_START, R_START | DIR_CCW),
# R_CCW_NEXT
(R_CCW_NEXT, R_CCW_FINAL, R_CCW_BEGIN, R_START),
)
# Enable this to emit codes twice per step.
# HALF_STEP == True: emits a code at 00 and 11
# HALF_STEP == False: emits a code at 00 only
HALF_STEP = False
STATE_TAB = HALF_TAB if HALF_STEP else FULL_TAB
# State table has, for each state (row), the new state
# to set based on the next encoder output. From left to right in,
# the table, the encoder outputs are 00, 01, 10, 11, and the value
# in that position is the new state to set.
class RotaryEncoder:
state = R_START
pinA = None
pinB = None
CLOCKWISE=1
ANTICLOCKWISE=2
BUTTONDOWN=3
BUTTONUP=4
def __init__(self, pinA, pinB, button,callback,revision):
self.pinA = pinA
self.pinB = pinB
self.button = button
self.callback = callback
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if revision == 1:
# For version 1 (old) boards
GPIO.setup(self.pinA, GPIO.IN)
GPIO.setup(self.pinB, GPIO.IN)
GPIO.setup(self.button, GPIO.IN)
else:
# The following lines enable the internal pull-up resistors
# on version 2 (latest) boards
GPIO.setup(self.pinA, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.pinB, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Add event detection to the GPIO inputs
GPIO.add_event_detect(self.pinA, GPIO.BOTH, callback=self.switch_event)
GPIO.add_event_detect(self.pinB, GPIO.BOTH, callback=self.switch_event)
GPIO.add_event_detect(self.button, GPIO.BOTH, callback=self.button_event, bouncetime=200)
# Call back routine called by switch events
def switch_event(self, switch):
# Grab state of input pins.
pinstate = (GPIO.input(self.pinB) << 1) | GPIO.input(self.pinA)
# Determine new state from the pins and state table.
self.state = STATE_TAB[self.state & 0xf][pinstate]
# Return emit bits, ie the generated event.
result = self.state & 0x30
if result:
event = self.CLOCKWISE if result == 32 else self.ANTICLOCKWISE
self.callback(event)
#print "Return: ",self.state & 0x30
#return self.state & 0x30
# Push button up event
def button_event(self,button):
if GPIO.input(button):
event = self.BUTTONUP
else:
event = self.BUTTONDOWN
self.callback(event)
return
# Get a switch state
def getSwitchState(self, switch):
return GPIO.input(switch)
|
nexlab/domotikad
|
domotika/mediasources/modules/OpenPLI/__init__.py
|
Python
|
gpl-3.0
| 1,420
| 0.002113
|
###############################################################
|
############
# Copyright (c) 2011-2014 Unixmedia S.r.l. <info@unixme
|
dia.it>
# Copyright (c) 2011-2014 Franco (nextime) Lanza <franco@unixmedia.it>
#
# Domotika System Controller Daemon "domotikad" [http://trac.unixmedia.it]
#
# This file is part of domotikad.
#
# domotikad is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# This is a twisted plugin directory
try:
from twisted.plugin import pluginPackagePaths
__path__.extend(pluginPackagePaths(__name__))
except ImportError:
# Twisted 2.5 doesn't include pluginPackagePaths
import sys, os
__path__.extend([os.path.abspath(os.path.join(x, 'mediasources', 'modules', 'OpenPLI'))
for x in sys.path])
__all__ = []
|
jirikuncar/invenio-formatter
|
invenio_formatter/registry.py
|
Python
|
gpl-2.0
| 3,187
| 0.000314
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
"""Implement registries for formatter."""
import os
from flask_registry import (
ModuleAutoDiscoveryRegistry,
PkgResourcesDirDiscoveryRegistry,
RegistryProxy,
)
from invenio.ext.registry import ModuleAutoDiscoverySubRegistry
from invenio.utils.datastructures import LazyDict
import yaml
format_templates_directories = RegistryProxy(
'format_templates_directories',
ModuleAutoDiscoveryRegistry,
'format_templates'
)
format_templates = RegistryProxy(
'format_templates',
PkgResourcesDirDiscoveryRegistry,
'.', registry_namespace=format_templates_directories
)
output_formats_directories = RegistryProxy(
'output_formats_directories',
ModuleAutoDiscoveryRegistry,
'output_formats'
)
output_formats_files = RegistryProxy(
'output_formats_files',
PkgResourcesDirDiscoveryRegistry,
'.', registry_namespace=output_formats_directories
)
template_context_functions = RegistryProxy(
'template_context_functions',
ModuleAutoDiscoverySubRegistry,
'template_context_functions'
)
def create_format_templates_lookup():
"""Create format templates."""
out = {}
def _register(path, level=1):
if level > 4:
return
normpath = os.path.normpath(path)
if os.path.isdir(normpath):
for p in os.listdir(normpath):
_register(os.path.join(normpath, p), level=level+1)
else:
parts = normpath.split(os.path.sep)
out[os.path.sep.join(parts[-level:])] = normpath
|
for t in reversed(format_templates):
_register(t)
return out
format_templates_look
|
up = LazyDict(create_format_templates_lookup)
def create_output_formats_lookup():
"""Create output formats."""
out = {}
for f in output_formats_files:
of = os.path.basename(f).lower()
data = {'names': {}}
if of.endswith('.yml'):
of = of[:-4]
with open(f, 'r') as f:
data.update(yaml.load(f) or {})
data['code'] = of
else:
continue # unknown filetype
if of in out:
continue
out[of] = data
return out
output_formats = LazyDict(create_output_formats_lookup)
export_formats = LazyDict(lambda: dict(
(code, of) for code, of in output_formats.items()
if of.get('content_type', '') != 'text/html' and of.get('visibility', 0)
))
|
Donkyhotay/MoonPy
|
zope/app/publication/publicationtraverse.py
|
Python
|
gpl-3.0
| 3,091
| 0.001941
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Publication Traverser
$Id: publicationtraverse.py 67630 2006-04-27 00:54:03Z jim $
"""
__docformat__ = 'restructuredtext'
from types import StringTypes
from zope.component import queryMultiAdapter
from zope.publisher.interfaces import NotFound
from zope.security.checker import ProxyFactory
from zope.traversing.namespace import namespaceLookup
from zope.traversing.namespace import nsParse
from z
|
ope.traversing.interfaces import TraversalError
from zope.publisher.interfaces import IPublishTraverse
class DuplicateNamespaces(Exception):
"""More than one namespace was specified in a request"""
class UnknownNamespace(Exception):
"""A
|
parameter specified an unknown namespace"""
class PublicationTraverse(object):
def traverseName(self, request, ob, name):
nm = name # the name to look up the object with
if name and name[:1] in '@+':
# Process URI segment parameters.
ns, nm = nsParse(name)
if ns:
try:
ob2 = namespaceLookup(ns, nm, ob, request)
except TraversalError:
raise NotFound(ob, name)
return ProxyFactory(ob2)
if nm == '.':
return ob
if IPublishTraverse.providedBy(ob):
ob2 = ob.publishTraverse(request, nm)
else:
# self is marker
adapter = queryMultiAdapter((ob, request), IPublishTraverse,
default=self)
if adapter is not self:
ob2 = adapter.publishTraverse(request, nm)
else:
raise NotFound(ob, name, request)
return ProxyFactory(ob2)
class PublicationTraverser(PublicationTraverse):
def traversePath(self, request, ob, path):
if isinstance(path, StringTypes):
path = path.split('/')
if len(path) > 1 and not path[-1]:
# Remove trailing slash
path.pop()
else:
path = list(path)
# Remove single dots
path = [x for x in path if x != '.']
path.reverse()
# Remove double dots
while '..' in path:
l = path.index('..')
if l < 0 or l+2 > len(path):
break
del path[l:l+2]
pop = path.pop
while path:
name = pop()
ob = self.traverseName(request, ob, name)
return ob
|
kodi-czsk/plugin.video.hejbejse.tv
|
resources/lib/hejbejse.py
|
Python
|
gpl-2.0
| 2,611
| 0.03447
|
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
|
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
|
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
|
cg31/tensorflow
|
tensorflow/python/kernel_tests/shape_ops_test.py
|
Python
|
apache-2.0
| 19,895
| 0.010003
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return ops.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), len(x_values)
class ShapeOpsTest(tf.test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu):
|
tf_ans = tf.shape(x)
tf_ans_64 = tf.shape(x
|
, out_type=tf.int64)
result = tf_ans.eval()
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeSparse(self, x_np, use_gpu=False):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.shape(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = tf.shape_n([x, x, x])
tf_ans_64 = tf.shape_n([x, x, x], out_type=tf.int64)
result = sess.run(tf_ans)
result_64 = sess.run(tf_ans_64)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertAllEqual(np_ans, result_64[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.rank(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRankSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.rank(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.size(x)
result = tf_ans.eval()
tf_ans_64 = tf.size(x, out_type=tf.int64)
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSizeSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.size(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
self._compareShapeSparse(x, use_gpu=False)
self._compareRankSparse(x, use_gpu=False)
self._compareSizeSparse(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
self._compareShapeSparse(x, use_gpu=True)
self._compareRankSparse(x, use_gpu=True)
self._compareSizeSparse(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.random.randn(2))
self._testAll(np.random.randn(2, 3))
self._testAll(np.random.randn(2, 3, 5))
self._testAll(np.random.randn(2, 3, 5, 7))
self._testAll(np.random.randn(2, 3, 5, 7, 11))
self._testAll(np.random.randn(2, 3, 5, 7, 11, 13))
# Disabled because it takes too long to run, but manually verified
# as passing at time of writing.
def _test64BitOutput(self):
with self.test_session():
inp = tf.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=tf.int64)
self.assertEqual(2**31, num_elements.eval())
# Too large for tf.int32 output.
with self.assertRaises(tf.errors.InvalidArgumentError):
with self.test_session():
inp = tf.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=tf.int32)
self.assertEqual(2**31, num_elements.eval())
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.test_session(use_gpu=use_gpu):
tensor = tf.expand_dims(x, dim)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsErrors(self):
with self.test_session():
self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), 4)
def testExpandDimsGradient(self):
with self.test_session():
inp = tf.constant(np.random.rand(4, 2).astype("f"),
dtype=tf.float32)
squeezed = tf.expand_dims(inp, 1)
err = tf.test.compute_gradient_error(inp, [4, 2], squeezed, [4, 1, 2])
self.assertLess(err, 1e-3)
def testExpandDimsScalar(self):
with self.test_session():
inp = tf.constant(7)
self.assertAllEqual([7], tf.expand_dims(inp, 0).eval())
self.assertAllEqual([7], tf.expand_dims(inp, -1).eval())
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.test_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = tf.squeeze(x, squeeze_dims)
tf_ans = tensor.eval()
else:
np_ans = np.squeeze(x)
tensor = tf.squeeze(x)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x
|
honnibal/spaCy
|
spacy/lang/ur/__init__.py
|
Python
|
mit
| 461
| 0.002169
|
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from ...language impo
|
rt Language, BaseDefaults
class UrduDefaults(BaseDefaults):
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
class Urdu(Language):
lang = "ur"
Defaults = UrduDefaults
__a
|
ll__ = ["Urdu"]
|
alejob/mdanalysis
|
package/MDAnalysis/migration/test_dummy_old_MDA_code.py
|
Python
|
gpl-2.0
| 6,334
| 0.018945
|
import MDAnalysis
from MDAnalysis.tests.datafiles import GRO, XTC
universe = MDAnalysis.Universe(GRO, XTC)
#old selection
all_selection = universe.selectAtoms('all')
#additional old selectAtoms selection (this comment shouldn't be modified despite containing the method name)
all_selection.selectAtoms('bynum 1:10')
#testing atomgroup methods to properties (and exclusion of comments from conversion):
#all_selection.residues()
all_selection.residues()
#all_selection.charges()
all_selection.charges()
#all_selection.indices()
all_selection.indices()
#all_selection.masses()
all_selection.masses()
#all_selection.names()
all_selection.names()
#all_selection.types()
all_selection.types()
#all_selection.radii()
all_selection.radii()
#all_selection.resids()
all_selection.resids()
#all_selection.resnames()
all_selection.resnames()
#all_selection.resnums()
all_selection.resnums()
#all_selection.segids()
all_selection.segids()
#similarly for atomgroup count method renaming:
#all_selection.numberOfAtoms()
all_selection.numberOfAtoms()
#all_selection.numberOfResidues()
all_selection.numberOfResidues()
#all_selection.numberOfSegments()
all_selection.numberOfSegments()
#for old import statements:
#import MDAnalysis.KDTree
import MDAnalysis.KDTree
#from MDAnalysis import KDTree
from MDAnalysis import KDTree
#import MDAnalysis.core.transformations
import MDAnalysis.core.transformations
#from MDAnalysis.core import transformations
from MDAnalysis.core import transformations
#import MDAnalysis.core.util
import MDAnalysis.core.util
#from MDAnalysis.core import util
from MDAnalysis.core import util
#import MDAnalysis.core.log
import MDAnalysis.core.log
#from MDAnalysis.core import log
from MDAnalysis.core import log
#import MDAnalysis.core.units
import MDAnalysis.core.units
#from MDAnalysis.core import units
from MDAnalysis.core import units
#import MDAnalysis.core.distances
import MDAnalysis.core.distances
#from MDAnalysis.core import distances
from MDAnalysis.core import distances
#import MDAnalysis.core.parallel
import MDAnalysis.core.parallel
#from MDAnalysis.core import parallel
from MDAnalysis.core import parallel
# These methods are now properties returning an object
#AtomGroup.bond() -> AtomGroup.bond.value()
AtomGroup.bond()
#AtomGroup.angle() -> AtomGroup.angle.value()
AtomGroup.angle()
#AtomGroup.torsion() -> AtomGroup.dihedral.value()
AtomGroup.torsion()
#AtomGroup.improper() -> AtomGroup.improper.value()
AtomGroup.improper()
#atomgroup, atom and universe torsion to dihedral conversions
#AtomGroup.torsions -> AtomGroup.dihedrals
AtomGroup.torsions
#Atom.torsions -> Atom.dihedrals
Atom.torsions
#Universe.torsions -> Universe.dihedrals
Universe.torsions
#camelcase fixes
# from core.AtomGroup
#totalMass -> total_mass
ag.totalMass
#totalCharge -> total_charge
ag.totalCharge
#centerOfGeometry -> center_of_geometry
ag.centerOfGeometry
#centerOfMass -> center_of_mass
ag.centerOfMass
#radiusOfGyration -> radius_of_gyration
ag.radiusOfGyration
#shapeParameter -> shape_parameter
ag.shapeParameter
#momentOfInertia -> moment_of_inertia
ag.momentOfInertia
#principalAxes -> principal_axes
ag.principalAxes
#packIntoBox -> pack_into_box
ag.packIntoBox
#asUniverse -> as_universe
ag.asUniverse
#align_principalAxis -> align_principal_axis
ag.align_principalAxis
# from lib.distances
#applyPBC -> apply_PBC
lib.distances.applyPBC
#frame_count = universe.trajectory.numframes
frame_count = universe.trajectory.numframes
traj = universe.trajectory
#frame_count = traj.numframes
frame_count = traj.numframes
# From MDAnalysis.lib.distances
#calc_torsions() -> calc_dihedr
|
als()
#from MDAnalysis.lib.distances import calc_torsions
from MDAnalysis.lib.distances import calc_torsions
#MDAnalysis.lib.distances.calc_torsions()
MDAnalysis.lib.distances.calc_torsions()
result = MDAnalysis.lib.distances.calc_torsions()
#dist.calc_torsions()
dist
|
.calc_torsions()
#atomgroup method pluralizations
#set_mass(new) --> set_masses(new)
ag.set_mass(new)
#set_charge(new) --> set_charges(new)
ag.set_charge(new)
#set_name(new) --> set_names(new)
ag.set_name(new)
#set_type(new) --> set_types(new)
ag.set_type(new)
#set_radius(new) --> set_radii(new)
ag.set_radius(new)
#set_bfactor(new) --> set_bfactors(new)
ag.set_bfactor(new)
#set_altloc(new) --> set_altlocs(new)
ag.set_altloc(new)
#set_serial(new) --> set_serials(new)
ag.set_serial(new)
#set_resid(new) --> set_resids(new)
ag.set_resid(new)
#set_resname(new) --> set_resnames(new)
ag.set_resname(new)
#set_resnum(new) --> set_resnums(new)
ag.set_resnum(new)
#set_segid(new) --> set_segids(new)
ag.set_segid(new)
#this test case has caused issues:
g.set_resid(resid * np.ones(len(g)))
#frame numbering is now 0-based:
#ts.frame - 1 -> ts.frame - 0
ts.frame - 1
#ts.frame + 2 -> ts.frame + 3
ts.frame + 2
#ts.frame == 3 -> ts.frame == 2
ts.frame == 3
#ts.frame != 5 -> ts.frame != 4
ts.frame != 5
#another
ts.frame = 9
#+1
[ts.frame for ts in self.trajectory[2:9:3]]
#+1
[ts.frame for ts in self.trajectory]
assert_equal(self.ts.frame, 1, "rewinding to frame 1")
#decoy comment
assert_almost_equal(ts.frame, 544)
assert_almost_equal(ts.dummy, 544)
#frame warning with indentation complexity:
class Dummy(object):
assert_almost_equal(ts.frame, 544)
ts.frame = 77
#numatoms to n_atoms keyword argument conversion while preserving the conversion from numberOfAtoms() to n_atoms as well:
with MDAnalysis.Writer(pdbtrj, multiframe=True, bonds=False, numatoms=u.atoms.numberOfAtoms()) as PDB:
pass
#alternative call syntax:
with MDAnalysis.coordinates.core.writer(pdbtrj, multiframe=True, bonds=False, numatoms=u.atoms.numberOfAtoms()) as PDB:
pass
#the above fix should be specific to .writer or .Writer, so the following should not be recognized (as a probe for specificity) from the keyword argument standpoint [method replacement is ok]:
with MDAnalysis.coordinates.core.writerr(pdbtrj, multiframe=True, bonds=False, numatoms=u.atoms.numberOfAtoms()) as PDB:
pass
#however, the fixer should be sufficiently flexible to recognize a different input filename, the omission of default arguments, spacing between 'numatoms' and '=', and an explicit integer value for numatoms, along with some additional kwargs:
with MDAnalysis.Writer(other_filename, numatoms = 55, start = 0, step = 2) as GRO:
pass
|
gongleiarei/qemu
|
scripts/qapi-introspect.py
|
Python
|
gpl-2.0
| 7,230
| 0.000138
|
#
# QAPI introspection generator
#
# Copyright (C) 2015-2016 Red Hat, Inc.
#
# Authors:
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from qapi import *
# Caveman's json.dumps() replacement (we're stuck at Python 2.4)
# TODO try to use json.dumps() once we get unstuck
def to_json(obj, level=0):
if obj is None:
ret = 'null'
elif isinstance(obj, str):
ret = '"' + obj.replace('"', r'\"') + '"'
elif isinstance(obj, list):
elts = [to_json(elt, level + 1)
for elt in obj]
ret = '[' + ', '.join(elts) + ']'
elif isinstance(obj, dict):
elts = ['"%s": %s' % (key.replace('"', r'\"'),
to_json(obj[key], level + 1))
for key in sorted(obj.keys())]
ret = '{' + ', '.join(elts) + '}'
else:
assert False # not implemented
if level == 1:
ret = '\n' + ret
return ret
def to_c_string(string):
return '"' + string.replace('\\', r'\\').replace('"', r'\"') + '"'
class QAPISchemaGenIntrospectVisitor(QAPISchemaVisitor):
def __init__(self, unmask):
self._unmask = unmask
self.defn = None
self.decl = None
self._schema = None
self._jsons = None
self._used_types = None
self._name_map = None
def visit_begin(self, schema):
self._schema = schema
self._jsons = []
self._used_types = []
self._name_map = {}
def visit_end(self):
# visit the types that are actually used
jsons = self._jsons
s
|
elf._jsons = []
for typ in self._used_types:
typ.visit(self)
# generate C
# TODO can generate awfully long lines
jsons.extend(self._jsons)
name = prefix + 'qmp_schema_json'
self.decl = mcgen('''
extern const char %(c_name)s[];
''',
c_name=c_name(name))
lines = to_json(jsons).split('\n')
c_string = '\n '.join([to_c_string(line) f
|
or line in lines])
self.defn = mcgen('''
const char %(c_name)s[] = %(c_string)s;
''',
c_name=c_name(name),
c_string=c_string)
self._schema = None
self._jsons = None
self._used_types = None
self._name_map = None
def visit_needed(self, entity):
# Ignore types on first pass; visit_end() will pick up used types
return not isinstance(entity, QAPISchemaType)
def _name(self, name):
if self._unmask:
return name
if name not in self._name_map:
self._name_map[name] = '%d' % len(self._name_map)
return self._name_map[name]
def _use_type(self, typ):
# Map the various integer types to plain int
if typ.json_type() == 'int':
typ = self._schema.lookup_type('int')
elif (isinstance(typ, QAPISchemaArrayType) and
typ.element_type.json_type() == 'int'):
typ = self._schema.lookup_type('intList')
# Add type to work queue if new
if typ not in self._used_types:
self._used_types.append(typ)
# Clients should examine commands and events, not types. Hide
# type names to reduce the temptation. Also saves a few
# characters.
if isinstance(typ, QAPISchemaBuiltinType):
return typ.name
if isinstance(typ, QAPISchemaArrayType):
return '[' + self._use_type(typ.element_type) + ']'
return self._name(typ.name)
def _gen_json(self, name, mtype, obj):
if mtype not in ('command', 'event', 'builtin', 'array'):
name = self._name(name)
obj['name'] = name
obj['meta-type'] = mtype
self._jsons.append(obj)
def _gen_member(self, member):
ret = {'name': member.name, 'type': self._use_type(member.type)}
if member.optional:
ret['default'] = None
return ret
def _gen_variants(self, tag_name, variants):
return {'tag': tag_name,
'variants': [self._gen_variant(v) for v in variants]}
def _gen_variant(self, variant):
return {'case': variant.name, 'type': self._use_type(variant.type)}
def visit_builtin_type(self, name, info, json_type):
self._gen_json(name, 'builtin', {'json-type': json_type})
def visit_enum_type(self, name, info, values, prefix):
self._gen_json(name, 'enum', {'values': values})
def visit_array_type(self, name, info, element_type):
element = self._use_type(element_type)
self._gen_json('[' + element + ']', 'array', {'element-type': element})
def visit_object_type_flat(self, name, info, members, variants):
obj = {'members': [self._gen_member(m) for m in members]}
if variants:
obj.update(self._gen_variants(variants.tag_member.name,
variants.variants))
self._gen_json(name, 'object', obj)
def visit_alternate_type(self, name, info, variants):
self._gen_json(name, 'alternate',
{'members': [{'type': self._use_type(m.type)}
for m in variants.variants]})
def visit_command(self, name, info, arg_type, ret_type,
gen, success_response, boxed):
arg_type = arg_type or self._schema.the_empty_object_type
ret_type = ret_type or self._schema.the_empty_object_type
self._gen_json(name, 'command',
{'arg-type': self._use_type(arg_type),
'ret-type': self._use_type(ret_type)})
def visit_event(self, name, info, arg_type, boxed):
arg_type = arg_type or self._schema.the_empty_object_type
self._gen_json(name, 'event', {'arg-type': self._use_type(arg_type)})
# Debugging aid: unmask QAPI schema's type names
# We normally mask them, because they're not QMP wire ABI
opt_unmask = False
(input_file, output_dir, do_c, do_h, prefix, opts) = \
parse_command_line("u", ["unmask-non-abi-names"])
for o, a in opts:
if o in ("-u", "--unmask-non-abi-names"):
opt_unmask = True
c_comment = '''
/*
* QAPI/QMP schema introspection
*
* Copyright (C) 2015 Red Hat, Inc.
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
h_comment = '''
/*
* QAPI/QMP schema introspection
*
* Copyright (C) 2015 Red Hat, Inc.
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
'qmp-introspect.c', 'qmp-introspect.h',
c_comment, h_comment)
fdef.write(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqmp-introspect.h"
''',
prefix=prefix))
schema = QAPISchema(input_file)
gen = QAPISchemaGenIntrospectVisitor(opt_unmask)
schema.visit(gen)
fdef.write(gen.defn)
fdecl.write(gen.decl)
close_output(fdef, fdecl)
|
openstack/diskimage-builder
|
diskimage_builder/elements/package-installs/tests/test_package_squash.py
|
Python
|
apache-2.0
| 7,323
| 0
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import imp
import mock
import os
from oslotest import base
from testtools.matchers import Mismatch
installs_squash_src = (os.path.dirname(os.path.realpath(__file__)) +
'/../bin/package-installs-squash')
installs_squash = imp.load_source('installs_squash', installs_squash_src)
class IsMatchingInstallList(object):
def __init__(self, expected):
self.expected = expected
def match(self, actual):
for phase, ops in self.expected.items():
if phase not in actual:
# missing the phase
return Mismatch(
"Phase %d does not exist in %s" % (phase, actual))
for op, pkgs in ops.items():
if op not in actual[phase]:
# missing op (install/uninstall)
return Mismatch(
"Operation %s does not exist in %s" % (op, ops))
# on py2 these can be out of order, we just want a match
expected_phase_ops = sorted(self.expected[phase][op])
actual_phase_ops = sorted(actual[phase][op])
if expected_phase_ops != actual_phase_ops:
return Mismatch(
"Operation list %s does not match expected %s" %
(actual[phase][op], self.expected[phase][op]))
class TestPackageInstall(base.BaseTestCase):
def setUp(self):
super(TestPackageInstall, self).setUp()
self.final_dict = collections.defaultdict(
functools.partial(collections.defaultdict, list))
def test_simple(self):
'''Test a basic package install'''
objs = {
'test_package': ''
}
result = installs_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('test_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
@mock.patch.object(os, 'environ', dict(ARCH='arm64'))
def test_arch(self):
'''Exercise the arch and not-arch flags'''
objs = {
'test_package': '',
'test_arm64_package': {
'arch': 'arm64'
},
'do_not_install': {
'not-arch': 'arm64'
}
}
result = instal
|
ls_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('test_package', 'test_element'),
('test_arm64_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
kernel_objs = {
'linux-image-generic': [
{
'not-arch': 'arm64',
'when': '
|
DIB_UBUNTU_KERNEL = linux-image-generic',
},
{
'arch': 'arm64',
'when': (
'DIB_RELEASE != xenial',
'DIB_UBUNTU_KERNEL = linux-image-generic',
)
},
],
'linux-generic-hwe-16.04': {
'arch': 'arm64',
'when': (
'DIB_RELEASE = xenial',
'DIB_UBUNTU_KERNEL = linux-image-generic',
)
},
}
def _test_kernel_objs_match(self, arch, release, expected):
with mock.patch.object(os, 'environ',
dict(ARCH=arch,
DIB_UBUNTU_KERNEL='linux-image-generic',
DIB_RELEASE=release)):
result = installs_squash.collect_data(
self.final_dict, self.kernel_objs, 'test_element')
expected = {
'install.d': {
'install': [(expected, 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
def test_param_list_x86(self):
self._test_kernel_objs_match('x86_64', 'focal', 'linux-image-generic')
def test_param_list_arm64_xenial(self):
self._test_kernel_objs_match('arm64', 'xenial',
'linux-generic-hwe-16.04')
def test_param_list_arm64_focal(self):
self._test_kernel_objs_match('arm64', 'focal', 'linux-image-generic')
@mock.patch.object(os, 'environ', dict(DIB_FEATURE='1', **os.environ))
def test_skip_when(self):
'''Exercise the when flag'''
objs = {
'skipped_package': {
'when': 'DIB_FEATURE=0'
},
'not_skipped_package': {
'when': 'DIB_FEATURE=1'
},
'not_equal_package': {
'when': 'DIB_FEATURE!=0'
},
'not_equal_skipped_package': {
'when': 'DIB_FEATURE!=1'
},
}
result = installs_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('not_skipped_package', 'test_element'),
('not_equal_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
def test_skip_no_var(self):
'''Exercise the skip_when missing variable failure case'''
objs = {
'package': {
'when': 'MISSING_VAR=1'
},
}
self.assertRaises(RuntimeError, installs_squash.collect_data,
self.final_dict, objs, 'test_element')
@mock.patch.object(os, 'environ',
dict(
DIB_A_FEATURE='1',
DIB_B_FEATURE='1',
DIB_C_FEATURE='1'))
def test_skip_when_list(self):
'''Exercise the when flag with lists'''
objs = {
'not_skipped_package': {
'when': [
'DIB_A_FEATURE=1',
'DIB_B_FEATURE=1',
'DIB_C_FEATURE=1'
]
},
'skipped_package': {
'when': [
'DIB_A_FEATURE=1',
'DIB_B_FEATURE=0',
'DIB_C_FEATURE=1',
]
},
}
result = installs_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('not_skipped_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
|
ITDevLtd/MCVirt
|
source/mcvirt-daemon/usr/lib/python2.7/dist-packages/mcvirt/parser_modules/virtual_machine/delete_parser.py
|
Python
|
gpl-2.0
| 2,888
| 0.003809
|
"""Provides VM delete parser."""
# Copyright (c) 2018 - I.T. Dev Ltd
#
# This file is part of MCVirt.
#
# MCVirt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# MCVirt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCVirt. If not, see <http://www.gnu.org/licenses/>
from argparse import SUPPRESS
class DeleteParser(object):
"""Handle VM delete parser."""
def __init__(self, subparser, parent_parser):
"""Create subparser for deleting VMs."""
self.parent_subparser = subparser
self.parent_parser = parent_parser
# Get arguments for deleting a VM
self.delete_parser = self.parent_subparser.add_parser(
'delete', help='Delete VM', parents=[self.parent_parser])
self.delete_parser.set_defaults(func=self.handle_delete)
# This argument is deprecated, this is now default functionality, replaced
# with --keep-data and --keep-config
self.delete_parser.add_argument('--delete-data', dest='delete_data', action='store_true',
help=SUPPRESS)
self.delete_parser.add_argument('--keep-config', dest='keep_config', action='store_true',
help=('Keeps the VM configuration directory\n'
'Note: A new VM cannot be created with '
'the same name until this directory '
'is removed'))
self.delete_parser.add_argument('--keep-disks', dest='keep_disks', action='store_true',
help=('Keeps the VM hard drives '
'(files on disk or logical volume)\n'
'Note: A new VM cannot be created with '
|
'the same name until this directory '
'is removed'))
self.delete_parser.add_argument('vm_name', metavar='VM Name', type=str, help='Name of VM')
def handle_delete(self, p_, args):
"""Handle delete."""
vm_factory = p_.rpc.get_connection('virtual_machine_factory')
vm_object = vm_factory.get_
|
virtual_machine_by_name(args.vm_name)
p_.rpc.annotate_object(vm_object)
vm_object.delete(keep_disks=args.keep_disks,
keep_config=args.keep_config)
|
lucidbard/requests-oauthlib
|
requests_oauthlib/oauth2_session.py
|
Python
|
isc
| 14,834
| 0.00209
|
from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token databse on automatic token refresh. If not
set a Toke
|
nUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self.client_id = client_id
if client is not None and
|
not self.client_id:
self.client_id = client.client_id
self.scope = scope
self.redirect_uri = redirect_uri
self.token = token or {}
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
self._client = client or WebApplicationClient(client_id, token=token)
self._client._populate_attributes(token or {})
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set([]),
'refresh_token_response': set([]),
'protected_request': set([]),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug('Generated new state %s.', self._state)
except TypeError:
self._state = self.state
log.debug('Re-using previously supplied state %s.', self._state)
return self._state
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
return bool(self._client.access_token)
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return self._client.prepare_request_uri(url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs), state
def fetch_token(self, token_url, code=None, authorization_response=None,
body='', auth=None, username=None, password=None, method='POST',
timeout=None, headers=None, verify=True, **kwargs):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
token_from_fragment instead of fetch_token.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param username: Username used by LegacyApplicationClients.
:param password: Password used by LegacyApplicationClients.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param headers: Dict to default request headers with.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(authorization_response,
state=self._state)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError('Please supply either code or '
'authorization_code parameters.')
body = self._client.prepare_request_body(code=code, body=body,
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
if method.up
|
BrunoCaimar/ArcREST
|
samples/change_folder.py
|
Python
|
apache-2.0
| 883
| 0.006795
|
"""
This sample shows how to loop through the folders
and print their titles
Python 2/3
ArcREST version 3.5.x
"""
from __future__ import print_function
from arcrest.security import AGOLTokenSecurityHandler
import arcrest
if __name__ == "__main__":
username = ""#Username
password = ""#password
proxy_port = None
proxy_url = None
agolSH = AGOLTokenSecurityHandler(username=username,
|
password=password)
admin = arcrest.manageorg.Administration(securityHandler=agolSH)
content = admin.content
user = content.users.user()
for folder in user.folders:
title = folder['tit
|
le']
print("Analyzing {}".format(title))
user.currentFolder = title
print("Current folder is {}".format(user.currentFolder))
print("Current folder has {} items".format(len(user.items)))
|
YouNeedToSleep/sleepy
|
src/sleepy/models/user.py
|
Python
|
bsd-3-clause
| 184
| 0
|
# -*- coding: utf-8 -*-
"""
slee
|
py.models.user
~~~~~~~~~~~~~~~~~~
User model.
"""
from django.contrib.auth.models import Abstract
|
User
class User(AbstractUser):
pass
|
crooks/nymserv
|
nymserv/daemon.py
|
Python
|
gpl-3.0
| 3,773
| 0.00053
|
#!/usr/bin/env python
#
# vim: tabstop=4 expandtab shiftwidth=4 noautoindent
import sys
import os
import time
import atexit
from signal import SIGTERM
class Daemon:
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method"""
def __init__(self, pidfile, stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""Do the UNIX double-fork magic, see Stevens' "Advanced Programming
in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
logmes = "fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)
sys.stderr.write(logmes)
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
logmes = "fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)
sys.stderr.write(logmes)
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
se = open(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pf = open
|
(self.pidfile, 'w+')
pf.write("%s\n" % os.getpid())
pf.close()
def delpid(se
|
lf):
os.remove(self.pidfile)
def start(self):
"""Start the daemon"""
# Check for a pidfile to see if the daemon already runs
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exists." % self.pidfile
message += " Daemon already running?\n"
sys.stderr.write(message)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""Stop the daemon"""
# Get the pid from the pidfile
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""Restart the daemon"""
self.stop()
self.start()
def run(self):
"""You should override this method when you subclass Daemon. It will
be called after the process has been daemonized by start() or
restart()."""
|
tencrance/cool-config
|
python3/grpc/greeter_server.py
|
Python
|
mit
| 690
| 0.01462
|
from concurrent import futures
import time
import grpc
import demo_pb2
import demo_pb2_grpc
_ONE_DAY_IN_SECOND = 60 * 60 * 24
class Greeter(demo_pb2_grpc.GreeterServicer):
def SayHello(sel
|
f,request,context):
return demo_pb2.HelloReply(message='Hello, %s' % request.name)
def serve
|
():
#grpc服务器
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
demo_pb2_grpc.add_GreeterServicer_to_server(Greeter(),server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECOND)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
|
opensignal/airflow
|
airflow/operators/__init__.py
|
Python
|
apache-2.0
| 2,447
| 0.000409
|
# Imports operators dynamically while keeping the package API clean,
# abstracting the underlying modules
from airflow.utils import import_module_attrs as _import_module_attrs
# These need to be integrated first as other operators depend on them
_import_module_attrs(globals(), {
'check_operator': [
'CheckOperator',
'ValueCheckOperator',
'IntervalCheckOperator',
],
})
_operators = {
'bash_operator': ['BashOperator'],
'python_operator': [
'PythonOperator',
'BranchPythonOperator',
'ShortCircuitOperator',
],
'hive_operator': ['HiveOperator'],
'pig_operator': ['PigOperator'],
'presto_check_operator': [
'PrestoCheckOperator',
'PrestoValueCheckOperator',
'PrestoIntervalCheckOperator',
],
'dagrun_operator': ['TriggerDagRunOperator'],
'dummy_operator': ['DummyOperator'],
'email_operator': ['EmailOperator'],
'hive_to_samba_operator': ['Hive2SambaOperator'],
'mysql_operator': ['MySqlOperator'],
'sqlite_operator': ['SqliteOperator'],
'mysql_to_hive': ['MySqlToHiveTransfer'],
'postgres_operator': ['PostgresOperator']
|
,
'sensors': [
'BaseSensorOperator',
'ExternalTaskSensor',
'
|
HdfsSensor',
'HivePartitionSensor',
'HttpSensor',
'MetastorePartitionSensor',
'S3KeySensor',
'S3PrefixSensor',
'SqlSensor',
'TimeDeltaSensor',
'TimeSensor',
'WebHdfsSensor',
],
'subdag_operator': ['SubDagOperator'],
'hive_stats_operator': ['HiveStatsCollectionOperator'],
's3_to_hive_operator': ['S3ToHiveTransfer'],
'hive_to_mysql': ['HiveToMySqlTransfer'],
'presto_to_mysql': ['PrestoToMySqlTransfer'],
's3_file_transform_operator': ['S3FileTransformOperator'],
'http_operator': ['SimpleHttpOperator'],
'hive_to_druid': ['HiveToDruidTransfer'],
'jdbc_operator': ['JdbcOperator'],
'mssql_operator': ['MsSqlOperator'],
'mssql_to_hive': ['MsSqlToHiveTransfer'],
'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'],
'generic_transfer': ['GenericTransfer'],
}
_import_module_attrs(globals(), _operators)
from airflow.models import BaseOperator
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import operators as _operators
for _operator in _operators:
globals()[_operator.__name__] = _operator
|
deepmind/open_spiel
|
open_spiel/python/games/data.py
|
Python
|
apache-2.0
| 1,333
| 0.002251
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except
|
in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, sof
|
tware
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Numerical information about some games or some specific settings of games.
TODO(author2): Ideally, this should also be available from C++.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pyspiel
def kuhn_nash_equilibrium(alpha):
"""Returns a Nash Equilibrium in Kuhn parameterized by alpha in [0, 1/3].
See https://en.wikipedia.org/wiki/Kuhn_poker#Optimal_strategy
Args:
alpha: The probability to bet on a Jack for Player 0.
Raises:
ValueError: If `alpha` is not within [0, 1/3].
"""
if not 0 <= alpha <= 1 / 3:
raise ValueError("alpha ({}) must be in [0, 1/3]".format(alpha))
return pyspiel.kuhn_poker.get_optimal_policy(alpha)
|
IODisrupt/OmegaBot
|
cogs/runescapecompare.py
|
Python
|
gpl-3.0
| 2,978
| 0.01041
|
import discord
import asyncio
import datetime
import time
import aiohttp
import threading
import glob
import re
import json
import os
import urllib.request
from discord.ext import commands
from random import randint
from random import choice as randchoice
from random import choice as rndchoice
from random import shuffle
from .utils.dataIO import fileIO
from .utils import checks
from bs4 import BeautifulSoup
class Runescapecompare:
"""Runescape-relate commands"""
def __init__(self, bot):
self.bot = bot
"""
imLink = http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=
nmLink = http://services.runescape.com/m=hiscore/index_lite.ws?player=
"""
@commands.group(name="compare", pass_context=True)
async def _compare(self, ctx):
if ctx.invoked_subcommand is None:
await self.bot.say("Please, choose a skill to compare!")
#####Overall#####
@_compare.command(name="overall", pass_context=True)
async def compare_overall(self, ctx, name1 : str, name2 : str):
address1 = "http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=" + name1
address2 = "http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=" + name2
try:
website1 = urllib.request.urlopen(address1)
website2 = urllib.request.urlopen(address2)
website_html1 = website1.read().decode(website1.headers.get_content_charset())
website_html2 = website2.read().decode(website2.headers.get_content_charset())
stats1 = website_html1.split("\n")
stats2 = website_html2.split("\n")
stat1 = stats1[0].split(",")
stat2= stats2[0].split(",")
if stat1[2] > stat2[2]:
comparerank = int(stat2[0]) - int(stat1[0])
comparelvl = int(stat1[1]) - int(stat2[1])
comparexp = int(stat1[2]) - int(stat2[2])
await self.bot.say("```" + name1 + "'s ranking is " + str(comparerank) + " ranks higher than " + name2 + "'s rank.\n" + name1 + "'s level is " + str(comparelvl) + " levels higher than " + name2 + "'s.\n" + name1 + "'s total experience is " + str(comparexp) + " higher than " + name2 + "'s.```")
if stat2[2] > stat1[2]:
comparerank = stat2[0] - stat1[0]
comparelvl = stat2[1] - stat1[1]
comp
|
arexp = stat2[2] - stat1[2]
await self.bot.say("```" + name2 + "'s ranking is " + str(comparerank) + " ranks higher than " + name1 + "'s rank.\n" + name2 + "'s level is " + str(comparelvl) + " levels higher
|
than " + name1 + "'s.\n" + name2 + "'s total experience is " + str(comparexp) + " higher than " + name1 + "'s.```")
except:
await self.bot.say("Sorry... Something went wrong there. Did you type the name correctly?")
def setup(bot):
n = Runescapecompare(bot)
bot.add_cog(n)
|
neurohackweek/avalanche
|
doc/conf.py
|
Python
|
apache-2.0
| 9,704
| 0.004637
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# shablona documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 14 10:29:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# General information about the project.
project = 'shablona'
copyright = '2015, Ariel Rokem'
currentdir = os.path.abspath(os.path.dirname(__file__))
ver_file = os.path.join(currentdir, '..', project, 'version.py')
with open(ver_file) as f:
exec(f.read())
source_version = __version__
currentdir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(currentdir, 'tools'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0' # numpydoc requires sphinc >= 1.0
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.append(os.path.abspath('sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'math_dollar', # has to go before numpydoc
'numpydoc',
'github',
'sphinx_gallery.gen_gallery']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# --- Sphinx Gallery ---
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs': '../examples',
# path where to save gallery generated examples
'gallery_dirs': 'auto_examples',
# To auto-generate example sections in the API
'doc_module': ('shablona',),
# Auto-generated mini-galleries go here
'backreferences_dir': 'gen_api'
}
# Automatically generate stub pages for API
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autog
|
enerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for
|
this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['localtoc.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'shablonadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'shablona.tex', 'shablona Documentation',
'Ariel Rokem', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page refe
|
kyungkoo/gae-ota-plist-maker
|
appengine_config.py
|
Python
|
mit
| 160
| 0
|
import sys
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib'))
sys.path.insert(0, os.path.join(os.path.dirname(
|
__file_
|
_), 'py'))
|
DavidNorman/tensorflow
|
tensorflow/python/ops/linalg/sparse/sparse.py
|
Python
|
apache-2.0
| 1,085
| 0
|
# Copyrigh
|
t 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eit
|
her express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for tf.linalg.sparse namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.linalg.sparse.sparse_csr_matrix_grad import *
from tensorflow.python.ops.linalg.sparse.sparse_csr_matrix_ops import *
# pylint: enable=wildcard-import
|
jigneshvasoya/ruffus
|
ruffus/print_dependencies.py
|
Python
|
mit
| 26,918
| 0.013485
|
#!/usr/bin/env python
################################################################################
#
# print_dependencies.py
#
#
# Copyright (c) 10/9/2009 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
"""
print_dependencies.py
provides support for dependency trees
"""
#
# Number of pre-canned colour schemes
#
CNT_COLOUR_SCHEMES = 8
import types
import sys
try:
from StringIO import StringIO
except:
from io import StringIO
from .adjacent_pairs_iterate import adjacent_pairs_iterate
from collections import defaultdict
def _get_name (node):
"""
Get name for node
use display_name or _name
"""
if hasattr(node, "display_name"):
return node.display_name
elif hasattr(node, "_name"):
return node._name
else:
raise Exception("Unknown node type [%s] has neither _name or display_name" % str(node))
#_________________________________________________________________________________________
# Helper functions for dot format
#_________________________________________________________________________________________
def attributes_to_str (attributes, name):
"""
helper func
|
tion for dot format
turns dictionary into a=b, c=d
|
...
"""
# remove ugly __main__. qualifier
name = name.replace("__main__.", "")
# if a label is specified, that overrides the node name
if "label" not in attributes:
attributes["label"] = name.replace(" before ", "\\nbefore ").replace(", ", ",\n")
# remove any quotes
if attributes["label"][0] == '<':
attributes["label"] = attributes["label"][1:-1]
html_label = True
else:
html_label = False
if attributes["label"][0] == '"':
attributes["label"] = attributes["label"][1:-1]
# add suffix / prefix
if "label_prefix" in attributes:
attributes["label"] = attributes["label_prefix"] + attributes["label"]
del attributes["label_prefix"]
if "label_suffix" in attributes:
attributes["label"] = attributes["label"] + attributes["label_suffix"]
del attributes["label_suffix"]
# restore quotes
if html_label:
attributes["label"] = '<' + attributes["label"] + '>'
else:
attributes["label"] = '"' + attributes["label"] + '"'
# support for html labels
#if "<" in name and ">" in name:
# attributes["label"] = '<' + name + '>'
#else:
# attributes["label"] = '"' + name + '"'
return "[" + ", ".join ("%s=%s" % (k,v) for k,v in sorted(attributes.items())) + "];\n"
#_________________________________________________________________________________________
#
# get_arrow_str_for_legend_key
#_________________________________________________________________________________________
def get_arrow_str_for_legend_key (from_task_type, to_task_type, n1, n2, colour_scheme):
"""
Get dot format for arrows inside legend key
"""
if "Vicious cycle" in (from_task_type, to_task_type):
return ("%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Vicious cycle"]["linecolor"]) +
"%s -> %s[color=%s, arrowtype=normal];\n" % (n2, n1, colour_scheme["Vicious cycle"]["linecolor"]))
if from_task_type in ("Final target", "Task to run",
"Up-to-date task forced to rerun",
"Explicitly specified task"):
return "%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Task to run"]["linecolor"])
elif from_task_type in ("Up-to-date task", "Down stream","Up-to-date Final target"):
return "%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Up-to-date"]["linecolor"])
#
# shouldn't be here!!
#
else:
return "%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Up-to-date"]["linecolor"])
#_________________________________________________________________________________________
#
# get_default_colour_scheme
#_________________________________________________________________________________________
def get_default_colour_scheme(default_colour_scheme_index = 0):
"""
A selection of default colour schemes "inspired" by entries in
http://kuler.adobe.com/#create/fromacolor
"""
if default_colour_scheme_index ==0:
bluey_outline = '"#0044A0"'
bluey = '"#EBF3FF"'
greeny_outline = '"#006000"'
greeny = '"#B8CC6E"'
orangey = '"#EFA03B"'
orangey_outline= greeny_outline
ruddy = '"#FF3232"'
elif default_colour_scheme_index ==1:
bluey_outline = '"#000DDF"'
bluey = 'transparent'
greeny_outline = '"#4B8C2E"'
greeny = '"#9ED983"'
orangey = '"#D98100"'
orangey_outline= '"#D9D911"'
ruddy = '"#D93611"'
elif default_colour_scheme_index ==2:
bluey_outline = '"#4A64A5"'
bluey = 'transparent'
greeny_outline = '"#4A92A5"'
greeny = '"#99D1C1"'
orangey = '"#D2C24A"'
orangey_outline= greeny_outline
ruddy = '"#A54A64"'
elif default_colour_scheme_index ==3:
bluey_outline = '"#BFB5FF"'
bluey = 'transparent'
greeny_outline = '"#7D8A2E"'
greeny = '"#C9D787"'
orangey = '"#FFF1DC"'
orangey_outline= greeny_outline
ruddy = '"#FF3E68"'
elif default_colour_scheme_index ==4:
bluey_outline = '"#004460"'
bluey = 'transparent'
greeny_outline = '"#4B6000"'
greeny = '"#B8CC6E"'
orangey = '"#FFF0A3"'
orangey_outline= greeny_outline
ruddy = '"#F54F29"'
elif default_colour_scheme_index ==5:
bluey_outline = '"#1122FF"'
bluey = '"#AABBFF"'
greeny_outline = '"#007700"'
greeny = '"#44FF44"'
orangey = '"#EFA03B"'
orangey_outline= '"#FFCC3B"'
ruddy = '"#FF0000"'
elif default_colour_scheme_index ==6:
bluey_outline = '"#0044A0"'
bluey = '"#EBF3FF"'
greeny_outline = 'black'
greeny = '"#6cb924"'
orangey = '"#ece116"'
orangey_outline= greeny_outline
ruddy = '"#FF3232"'
else:
bluey_outline = '"#87BAE4"'
bluey = 'transparent'
greeny_outline = '"#87B379"'
greeny = '"#D3FAE3"'
orangey = '"#FDBA40"'
orangey_outline= greeny_outline
ruddy = '"#b9495e"'
default_colour_scheme = defaultdict(dict)
default_colour_scheme["Vicious cycle"]["linecolor"] = ruddy
default_colour_scheme["Pipeline"]["fontcolor"] = ruddy
default_colour_scheme["Key"]["fontcolor"] = "black"
default_colour_scheme["Key"]["fillcolor"]
|
hycis/TensorGraph
|
test/data_iterator_test.py
|
Python
|
apache-2.0
| 1,793
| 0.001673
|
import tensorgraph as tg
import numpy as np
import time
def test_SimpleBlocks():
X = np.random.rand(100, 200)
with open('X.npy', 'wb') as f:
np.save(f, X)
db = tg.SimpleBlocks(['X.npy']*10, batchsize=32, allow_preload=True)
t1 = time.time()
count = 1
for blk in db:
print(count)
count += 1
for batch in blk:
print(time.sleep(0.1))
pass
print('with preload time:', time.time() - t1)
db = tg.SimpleBlocks(['X.npy']*10, batchsize=32, allow_preload=False)
t1 = time.time()
count =
|
1
for blk in db:
print(count)
count += 1
for batch in blk:
print(time.sleep(0.1))
pass
print('without preload time:', time.time() - t1)
db = tg.SimpleBlocks([('X.npy', 'X.npy'), ('X.npy', 'X.npy')], batchsize=32, allow_preload=False)
for blk in db:
print(blk)
for batch in blk:
|
print('len batch:', len(batch))
print('batch1 size:', batch[0].shape)
print('batch2 size:', batch[1].shape)
def test_DataBlocks():
X = np.random.rand(100, 200)
with open('X.npy', 'wb') as f:
np.save(f, X)
db = tg.DataBlocks(['X.npy']*10, batchsize=32, allow_preload=False)
for train_blk, valid_blk in db:
n_exp = 0
pbar = tg.ProgressBar(len(train_blk))
for batch in train_blk:
n_exp += len(batch[0])
time.sleep(0.05)
pbar.update(n_exp)
print()
pbar = tg.ProgressBar(len(valid_blk))
n_exp = 0
for batch in valid_blk:
n_exp += len(batch[0])
time.sleep(0.05)
pbar.update(n_exp)
print()
if __name__ == '__main__':
test_DataBlocks()
test_SimpleBlocks()
|
liquidinstruments/pymoku
|
pymoku/_iirfilterbox.py
|
Python
|
mit
| 32,876
| 0
|
import logging
import struct
from copy import deepcopy
from pymoku._oscilloscope import _CoreOscilloscope
from pymoku._instrument import to_reg_signed
from pymoku._instrument import from_reg_signed
from pymoku._instrument import to_reg_unsigned
from pymoku._instrument import from_reg_unsigned
from pymoku._instrument import to_reg_bool
from pymoku._instrument import from_reg_bool
from pymoku._instrument import ADC_SMP_RATE
from pymoku._instrument import needs_commit
from pymoku._instrument import ValueOutOfRangeException
from pymoku import _utils
from pymoku._dec_filter import DecFilter
log = logging.getLogger(__name__)
REG_ENABLE = 96
REG_MONSELECT = 111
REG_INPUTOFFSET_CH0 = 112
REG_INPUTOFFSET_CH1 = 113
REG_OUTPUTOFFSET_CH0 = 114
REG_OUTPUTOFFSET_CH1 = 115
REG_CH0_CH0GAIN = 116
REG_CH0_CH1GAIN = 117
REG_CH1_CH0GAIN = 118
REG_CH1_CH1GAIN = 119
REG_INPUTSCALE_CH0 = 120
REG_INPUTSCALE_CH1 = 121
REG_OUTPUTSCALE_CH0 = 122
REG_OUTPUTSCALE_CH1 = 123
REG_SAMPLINGFREQ = 124
REG_FILT_RESET = 62
_IIR_MON_NONE = 0
_IIR_MON_ADC1 = 1
_IIR_MON_IN1 = 2
_IIR_MON_OUT1 = 3
_IIR_MON_ADC2 = 4
_IIR_MON_IN2 = 5
_IIR_MON_OUT2 = 6
# Monitor probe locations (for A and B channels)
_IIR_MON_NONE = 0
_IIR_MON_ADC1 = 1
_IIR_MON_IN1 = 2
_IIR_MON_OUT1 = 3
_IIR_MON_ADC2 = 4
_IIR_MON_IN2 = 5
_IIR_MON_OUT2 = 6
# Oscilloscope data sources
_IIR_SOURCE_A = 0
_IIR_SOURCE_B = 1
_IIR_SOURCE_IN1 = 2
_IIR_SOURCE_IN2 = 3
_IIR_SOURCE_EXT = 4
# Input mux selects for Oscilloscope
_IIR_OSC_SOURCES = {
'a': _IIR_SOURCE_A,
'b': _IIR_SOURCE_B,
'in1': _IIR_SOURCE_IN1,
'in2': _IIR_SOURCE_IN2,
'ext': _IIR_SOURCE_EXT
}
_IIR_COEFFWIDTH = 48
_IIR_INPUT_SMPS = ADC_SMP_RATE / 4
_IIR_CHN_BUFLEN = 2**13
_ADC_DEFAULT_CALIBRATION = 3750.0 # Bits/V (No attenuation)
class IIRFilterBox(_CoreOscilloscope):
"""
The IIR Filter Box implements infinite impulse response (IIR) filters
using 4 cascaded Direct Form 1 second-order stages with a final output
gain stage. The total transfer function can be written:
.. math::
H(z) = G * prod_{k=1}^4 s_k * frac{b_0k + b_1k * z^-1 + b_2k * z^-2}
* {1 + a_1k * z^-1 + a_2k * z^-2}
To specify a filter, you must supply an array containing the filter
coefficients. The array should contain five rows and six columns.
The first row has one column entry, corresponding to the overall gain
factor G. The following four rows have six entries each, corresponding
to the s, b0, b1, b2, a1 and a2 coefficients of the four cascaded SOS
filters.
Example array dimensions:
+----------+------+------+------+------+-------+
| G | | | | | |
+==========+======+======+======+======+=======+
| s1 | b0.1 | b1.1 | b2.1 | a1.1 | a2.1 |
+----------+------+------+------+------+-------+
| s2 | b0.2 | b1.2 | b2.2 | a1.2 | a2.2 |
+----------+------+------+------+------+-------+
| s3 | b0.3 | b1.3 | b2.3 | a1.3 | a2.3 |
+----------+------+--
|
----+------+------+-------+
| s4 |
|
b0.4 | b1.4 | b2.4 | a1.4 | a2.4 |
+----------+------+------+------+------+-------+
Each 'a' coefficient must be a float in the range [-4.0, +4.0). 's'
coefficients are multiplied into each 'b' coefficient before being sent to
the device. These products (sN x b0.N, sN x b1.N, sN x b2.N) must also fall
in the range [-4.0, +4.0). Internally, the 'a' and 'b' coefficients are
represented as signed 48-bit fixed-point numbers, with 45 fractional bits.
The gain coefficient G must be a float in the range [-8e6, 8e6 - 2^-24].
It is represented internally as a signed 48-bit fixed-point with 24
fractional bits.
Filter coefficients can be computed using signal processing toolboxes in
e.g. MATLAB or SciPy.
.. note::
The overall output gain of the instrument is the product of the gain
of the filter, set in the coefficient matrix, and the output stage
gain set using :any:`set_gains_offsets`.
.. warning::
Some coefficients may result in overflow or underflow, which degrade
filter performance. Filter responses should be checked prior to use.
"""
def __init__(self):
"""Create a new IIR FilterBox instrument, ready to be attached to a
Moku."""
super(IIRFilterBox, self).__init__()
self._register_accessors(_iir_reg_handlers)
self.id = 6
self.type = "iirfilterbox"
# Monitor samplerate
self._input_samplerate = _IIR_INPUT_SMPS
self._chn_buffer_len = _IIR_CHN_BUFLEN
# Remembers monitor source choice
self.monitor_a = 'none'
self.monitor_b = 'none'
self._decfilter1 = DecFilter(self, 103)
self._decfilter2 = DecFilter(self, 107)
# Initialise all local configuration variables
# These remember user settings prior to on-commit reg calculations
self._matrixscale_ch1_ch1 = 0
self._matrixscale_ch1_ch2 = 0
self._matrixscale_ch2_ch1 = 0
self._matrixscale_ch2_ch2 = 0
self._input_scale1 = 0
self._output_scale1 = 0
self._input_offset1 = 0
self._output_offset1 = 0
self._input_scale2 = 0
self._output_scale2 = 0
self._input_offset2 = 0
self._output_offset2 = 0
# TODO: Read these back on_reg_sync
self.filter_ch1 = [[0, 0, 0, 0, 0, 0]] * 4
self.filter_ch2 = [[0, 0, 0, 0, 0, 0]] * 4
@needs_commit
def set_defaults(self):
""" Reset the IIR to sane defaults. """
super(IIRFilterBox, self).set_defaults()
# We only allow looking at the monitor signals in the embedded scope
self._set_source(1, _IIR_SOURCE_A)
self._set_source(2, _IIR_SOURCE_B)
# Default values
self.input_en1 = True
self.output_en1 = False
self.input_en2 = True
self.output_en2 = False
self.set_control_matrix(1, 1.0, 0.0)
self.set_control_matrix(2, 0.0, 1.0)
self.filter_reset = 0
# initialize filter coefficient arrays as all pass filters
b = [1.0, 1.0, 0.0, 0.0, 0.0, 0.0]
self.filter_ch1 = [b, b, b, b]
self.filter_ch2 = [b, b, b, b]
# do we want to set here?
self.set_frontend(1, fiftyr=True, atten=False, ac=False)
self.set_frontend(2, fiftyr=True, atten=False, ac=False)
# Default unity gain, zero offset, identity mixing matrix.
self.set_gains_offsets(1)
self.set_gains_offsets(2)
# Set default settings to plotting script values that have been tested
# thoroughly
self.set_monitor('a', 'in1')
self.set_monitor('b', 'in2')
self.set_trigger('a', 'rising', 0)
self._decfilter1.set_samplerate(8)
self._decfilter2.set_samplerate(8)
self.set_timebase(-1e-3, 1e-3)
@needs_commit
def set_control_matrix(self, ch, scale_in1, scale_in2):
"""
Configure the input control matrix specifying the input signal mixing
for the specified filter channel.
Input mixing allows a filter channel to act on a linear combination of
the two input signals.
:type ch: int, {1, 2}
:param ch: target filter channel
:type scale_in1: float, [-20, 20]
:param scale_in1: linear scale factor of input 1 signal added to
target filter channel input.
To avoid quantization, use at most one decimal place.
:type scale_in2: float, [-20, 20]
:param scale_in2: linear scale factor of input 2 signal added to
target filter channel input.
To avoid quantization, use at most one decimal place.
"""
_utils.check_parameter_valid('set',
ch,
[1, 2],
'filter channel')
_utils.check_parameter_valid('range',
scale_in1,
[-20, 20],
'control matrix scale (ch1)',
|
tuxology/bcc
|
examples/tracing/mallocstacks.py
|
Python
|
apache-2.0
| 1,942
| 0.000515
|
#!/usr/bin/python
#
# mallocstacks Trace malloc() calls in a process and print the full
# stack trace for all callsites.
# For Linux, uses BCC, eBPF. Embedded C.
#
# This script is a basic example of the new Linux 4.6+ BPF_STACK_TRACE
# table API.
#
# Copyright 2016 GitHub, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from __future__ import print_function
from bcc import BPF
from bcc.utils import printb
from time import sleep
import sys
if len(sys.argv) < 2:
print("USAGE: mallocstacks PID [NUM_STACKS=1024]")
exit()
pid = int(sys.argv[1])
if len(sys.argv) == 3:
try:
assert int(sys.argv[2]) > 0, ""
except (ValueError, AssertionError) as e:
print("USAGE: mallocstacks PID [NUM_STACKS=1024]")
print("NUM_STACKS must be a non-zero, positive integer")
exit()
stacks = sys.argv[2]
else:
stacks = "1024"
# load BPF program
b = BPF(text="""
#include <uapi/linux/ptrace.h>
BPF_HASH(calls, int);
BPF_STACK_TRACE(stack_traces, """ + stacks + """);
int alloc_enter(struct pt_regs *ctx, size_t size) {
int key = stack_traces.get_stackid(ctx, BPF_F_USER_STACK);
if (key < 0)
return 0;
// could also use `calls.increment(key, size);`
u64 zero = 0, *val;
|
val = calls.lookup_or_try_init(&key, &zero);
if (val) {
(*val) += size;
}
return 0;
};
""")
b.attach_uprobe(name="c", sym="malloc", fn_name="alloc_
|
enter", pid=pid)
print("Attaching to malloc in pid %d, Ctrl+C to quit." % pid)
# sleep until Ctrl-C
try:
sleep(99999999)
except KeyboardInterrupt:
pass
calls = b.get_table("calls")
stack_traces = b.get_table("stack_traces")
for k, v in reversed(sorted(calls.items(), key=lambda c: c[1].value)):
print("%d bytes allocated at:" % v.value)
if k.value > 0 :
for addr in stack_traces.walk(k.value):
printb(b"\t%s" % b.sym(addr, pid, show_offset=True))
|
teenvio/SOAP-API
|
examples/example_soap.py
|
Python
|
lgpl-3.0
| 1,984
| 0.015121
|
#!/usr/bin/env python
#
# @copyright Ipdea Land, S.L.
#
# LGPL v3 - GNU LESSER GENERAL PUBLIC LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU LESSER General Public License as published by
# the Free Software Foundation, either version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU LESSER General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from SOAPpy import WSDL
url= 'https://secure.teenvio.com/v4/public/api/soap/wsdl.xml'
server = WSDL.Proxy(url)
# change to 1 for debug
server.soapproxy.config.dumpSOAPOut = 0
server.soapproxy.config.dumpSOAPIn = 0
print
print "Check login:"
print "--------------"
# change this!!
server.loggin(usuario='user',plan='plan',password='pass') # change this!!
login_ok=server.checkLoggin()
if login_ok == False:
print "Login failed, check data!"
sys.exit(0)
print login_ok
print
print "Contacts for group 885:"
print "--------------"
server.loggin(usuario='user',plan='plan',password='pass') # change this!!
contactos=server.getGroupContacts(id_grupo=885)
for contacto in contactos :
print contacto
print
print "User data:"
print "--------------"
server.loggin(usuario='user',plan='plan',password='pass') # change this!!
user
|
=server.getUserData()
user_dict = user[0]
for fila in user_dict:
value=fila['value']
if type(value) is int:
value=str(value)
print fila['key']+": "+value
print
print "group 885 data:"
print "--------------"
server.loggin(usuario='user',plan='plan',password='pass') # cha
|
nge this!!
group_data=server.getGroupData(id_grupo=885)
print "Name: "+group_data['Nombre']
print "Desc: "+group_data['Descripcion']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.