repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ybak/myblog
|
app/pngcanvas.py
|
Python
|
mit
| 9,000
| 0.037889
|
#!/usr/bin/env python
"""Simple PNG Canvas for Python"""
__version__ = "0.8"
__author__ = "Rui Carmo (http://the.taoofmac.com)"
__copyright__ = "CC Attribution-NonCommercial-NoDerivs 2.0 Rui Carmo"
__contributors__ = ["http://collaboa.weed.rbse.com/repository/file/branches/pgsql/lib/spark_pr.rb"], ["Eli Bendersky"]
import zlib, struct
signature = struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)
# alpha blends two colors, using the alpha given by c2
def blend(c1, c2):
return [c1[i]*(0xFF-c2[3]) + c2[i]*c2[3] >> 8 for i in range(3)]
# calculate a new alpha given a 0-0xFF intensity
def intensity(c,i):
return [c[0],c[1],c[2],(c[3]*i) >> 8]
# calculate perceptive grayscale value
def grayscale(c):
return int(c[0]*0.3 + c[1]*0.59 + c[2]*0.11)
# calculate gradient colors
def gradientList(start,end,steps):
delta = [end[i] - start[i] for i in range(4)]
grad = []
for i in range(steps+1):
grad.append([start[j] + (delta[j]*i)/steps for j in range(4)])
return grad
class PNGCanvas:
def __init__(self, width, height,bgcolor=[0xff,0xff,0xff,0xff],color=[0,0,0,0xff]):
self.canvas = []
self.width = width
self.height = height
self.color = color #rgba
bgcolor = bgcolor[0:3] # we don't need alpha for background
for i in range(height):
self.canvas.append([bgcolor] * width)
def point(self,x,y,color=None):
if x<0 or y<0 or x>self.width-1 or y>self.height-1: return
if color == None: color = self.color
self.canvas[y][x] = blend(self.canvas[y][x],color)
def _rectHelper(self,x0,y0,x1,y1):
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
return [x0,y0,x1,y1]
def verticalGradient(self,x0,y0,x1,y1,start,end):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
grad = gradientList(start,end,y1-y0)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,grad[y-y0])
def rectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
self.polyline([[x0,y0],[x1,y0],[x1,y1],[x0,y1],[x0,y0]])
def filledRectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,self.color)
def copyRect(self,x0,y0,x1,y1,dx,dy,destination):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
destination.canvas[dy+y-y0][dx+x-x0] = self.canvas[y][x]
def blendRect(self,x0,y0,x1,y1,dx,dy,destination,alpha=0xff):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
rgba = self.canvas[y][x] + [alpha]
destination.point(dx+x-x0,dy+y-y0,rgba)
# draw a line using Xiaolin Wu's antialiasing technique
def line(self,x0, y0, x1, y1):
# clean params
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if y0>y1:
y0, y1, x0, x1 = y1, y0, x1, x0
dx = x1-x0
if dx < 0:
sx = -1
else:
sx = 1
dx *= sx
dy = y1-y0
# 'easy' cases
if dy == 0:
for x in range(x0,x1,sx):
self.point(x, y0)
return
if dx == 0:
for y in range(y0,y1):
self.point(x0, y)
self.point(x1, y1)
return
if dx == dy:
for x in range(x0,x1,sx):
self.point(x, y0)
y0 = y0 + 1
return
# main loop
self.point(x0, y0)
e_acc = 0
if dy > dx: # vertical displacement
e = (dx << 16) / dy
for i in range(y0,y1-1):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
x0 = x0 + sx
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
y0 = y0 + 1
self.point(x0 + sx, y0, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
return
# horizontal displacement
e = (dy << 16) / dx
for i in range(x0,x1-sx,sx):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
y0 = y0 + 1
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
x0 = x0 + sx
self.point(x0, y0 + 1, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
def polyline(self,arr):
for i in range(0,len(arr)-1):
self.line(arr[i][0],arr[i][1],arr[i+1][0], arr[i+1][1])
def dump(self):
raw_list = []
for y in range(self.height):
raw_list.append(chr(0)) # filter type 0 (None)
for x in range(self.width):
raw_list.append(struct.pack("!3B",*self.canvas[y][x]))
raw_data = ''.join(raw_list)
# 8-bit image represented as RGB tuples
# simple transparency, alpha is pure white
return signature + \
self.pack_chunk('IHDR', struct.pack("!2I5B",self.width,self.height,8,2,0,0,0)) + \
self.pack_chunk('tRNS', struct.pack("!6B",0xFF,0xFF,0xFF,0xFF,0xFF,0xFF)) + \
self.pack_chunk('IDAT', zlib.compress(raw_data,9)) + \
self.pack_chunk('IEND', '')
def pack_chunk(self,tag,data):
to_check = tag + data
return struct.pack("!I",len(data)) + to_check + struct.pack("!I", zlib.crc32(to_check) & 0xFFFFFFFF)
def load(self,f):
assert f.read(8) == signature
self.canvas=[]
for tag, data in self.chunks(f):
if tag == "IHDR":
( width,
height,
bitdepth,
colortype,
compression, filter, interlace ) = struct.unpack("!2I5B",data)
self.
|
width = width
self.height = height
if (bitdepth,colortype,compression, filter, interlace) != (8,2,0,0,0):
raise TypeError('Unsupported PNG format')
# we ignore tRNS because we use
|
pure white as alpha anyway
elif tag == 'IDAT':
raw_data = zlib.decompress(data)
rows = []
i = 0
for y in range(height):
filtertype = ord(raw_data[i])
i = i + 1
cur = [ord(x) for x in raw_data[i:i+width*3]]
if y == 0:
rgb = self.defilter(cur,None,filtertype)
else:
rgb = self.defilter(cur,prev,filtertype)
prev = cur
i = i+width*3
row = []
j = 0
for x in range(width):
pixel = rgb[j:j+3]
row.append(pixel)
j = j + 3
self.canvas.append(row)
def defilter(self,cur,prev,filtertype,bpp=3):
if filtertype == 0: # No filter
return cur
elif filtertype == 1: # Sub
xp = 0
for xc in range(bpp,len(cur)):
cur[xc] = (cur[xc] + cur[xp]) % 256
xp = xp + 1
elif filtertype == 2: # Up
for xc in range(len(cur)):
cur[xc] = (cur[xc] + prev[xc]) % 256
elif filtertype == 3: # Average
xp = 0
for xc in range(len(cur)):
cur[xc] = (cur[xc] + (cur[xp] + prev[xc])/2) % 256
xp = xp + 1
elif filtertype == 4: # Paeth
xp = 0
for i in range(bpp):
cur[i] = (cur[i] + prev[i]) % 256
for xc in range(bpp,len(cur)):
a = cur[xp]
b = prev[xc]
c = prev[xp]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
value = a
elif pb <= pc:
value = b
else:
value = c
cur[xc] = (cur[xc] + value) % 256
xp = xp + 1
else:
raise TypeError('Unrecognized scanline filter type')
return cur
def chunks(self,f):
while 1:
try:
length = struct.unpack("!I",f.read(4))[0]
tag = f.read(4)
data = f.read(length)
crc = struct.unpack("!i",f.read(4))[0]
except:
return
if zlib.crc32(tag + data) != crc:
raise IOError
yield [tag,data]
if __name__ == '__main__':
width = 128
height = 64
print "Creating Canvas..."
c = PNGCanvas(width,height)
c.color = [0xff,0,0,0xff]
c.re
|
sunchuanleihit/vimrc
|
sources_non_forked/YouCompleteMe/install.py
|
Python
|
mit
| 1,500
| 0.040667
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import subprocess
import sys
import os.path as p
import glob
PY_MAJOR, PY_MINOR = sys.version_info[ 0 : 2 ]
if not ( ( PY_MAJOR == 2 and PY_MINOR >= 6 ) or
( PY_MAJOR == 3 and PY_MINOR >= 3 ) or
PY_MAJOR > 3 ):
sys.exit( 'YouCompleteMe requires Python >= 2.6 or >= 3.3; '
'your version of Python is ' + sys.version )
DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )
DIR_OF_OLD_LIBS = p.join( DIR_OF_THIS_SCRIPT, 'python' )
def CheckCall( args, **kwargs ):
try:
subprocess.check_call( args, **kwargs )
except subprocess.Ca
|
lledProcessError as error:
|
sys.exit( error.returncode )
def Main():
build_file = p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'ycmd', 'build.py' )
if not p.isfile( build_file ):
sys.exit(
'File {0} does not exist; you probably forgot to run:\n'
'\tgit submodule update --init --recursive\n'.format( build_file ) )
CheckCall( [ sys.executable, build_file ] + sys.argv[ 1: ] )
# Remove old YCM libs if present so that YCM can start.
old_libs = (
glob.glob( p.join( DIR_OF_OLD_LIBS, '*ycm_core.*' ) ) +
glob.glob( p.join( DIR_OF_OLD_LIBS, '*ycm_client_support.*' ) ) +
glob.glob( p.join( DIR_OF_OLD_LIBS, '*clang*.*') ) )
for lib in old_libs:
os.remove( lib )
if __name__ == "__main__":
Main()
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/dctp.py
|
Python
|
gpl-3.0
| 3,148
| 0.02987
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
int_or_none,
unified_timestamp,
url_or_none,
)
class DctpTvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dctp\.tv/(?:#/)?filme/(?P<id>[^/?#&]+)'
_TESTS = [{
# 4x3
'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/',
'info_dict': {
'id': '95eaa4f33dad413aa17b4ee613cccc6c',
'display_id': 'videoinstallation-fuer-eine-kaufhausfassade',
'ext': 'flv',
'title': 'Videoinstallation für eine Kaufhausfassade',
'description': 'Kurzfilm',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 71.24,
'timestamp': 1302172322,
'upload_date': '20110407',
|
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# 16x9
'url
|
': 'http://www.dctp.tv/filme/sind-youtuber-die-besseren-lehrer/',
'only_matching': True,
}]
_BASE_URL = 'http://dctp-ivms2-restapi.s3.amazonaws.com'
def _real_extract(self, url):
display_id = self._match_id(url)
version = self._download_json(
'%s/version.json' % self._BASE_URL, display_id,
'Downloading version JSON')
restapi_base = '%s/%s/restapi' % (
self._BASE_URL, version['version_name'])
info = self._download_json(
'%s/slugs/%s.json' % (restapi_base, display_id), display_id,
'Downloading video info JSON')
media = self._download_json(
'%s/media/%s.json' % (restapi_base, compat_str(info['object_id'])),
display_id, 'Downloading media JSON')
uuid = media['uuid']
title = media['title']
ratio = '16x9' if media.get('is_wide') else '4x3'
play_path = 'mp4:%s_dctp_0500_%s.m4v' % (uuid, ratio)
servers = self._download_json(
'http://www.dctp.tv/streaming_servers/', display_id,
note='Downloading server list JSON', fatal=False)
if servers:
endpoint = next(
server['endpoint']
for server in servers
if url_or_none(server.get('endpoint')) and
'cloudfront' in server['endpoint'])
else:
endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/'
app = self._search_regex(
r'^rtmpe?://[^/]+/(?P<app>.*)$', endpoint, 'app')
formats = [{
'url': endpoint,
'app': app,
'play_path': play_path,
'page_url': url,
'player_url': 'http://svm-prod-dctptv-static.s3.amazonaws.com/dctptv-relaunch2012-110.swf',
'ext': 'flv',
}]
thumbnails = []
images = media.get('images')
if isinstance(images, list):
for image in images:
if not isinstance(image, dict):
continue
image_url = url_or_none(image.get('url'))
if not image_url:
continue
thumbnails.append({
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
return {
'id': uuid,
'display_id': display_id,
'title': title,
'alt_title': media.get('subtitle'),
'description': media.get('description') or media.get('teaser'),
'timestamp': unified_timestamp(media.get('created')),
'duration': float_or_none(media.get('duration_in_ms'), scale=1000),
'thumbnails': thumbnails,
'formats': formats,
}
|
aronsky/home-assistant
|
homeassistant/components/nut/config_flow.py
|
Python
|
apache-2.0
| 7,989
| 0.000876
|
"""Config flow for Network UPS Tools (NUT) integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_ALIAS,
CONF_BASE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import PyNUTData
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
KEY_STATUS,
KEY_STATUS_DISPLAY,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
def _base_schema(discovery_info):
"""Generate base schema."""
base_schema = {}
if not discovery_info:
base_schema.update(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
)
base_schema.update(
{vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str}
)
return vol.Schema(base_schema)
def _resource_schema_base(available_resources, selected_resources):
"""Resource selection schema."""
known_available_resources = {
sensor_id: sensor_desc.name
for sensor_id, sensor_desc in SENSOR_TYPES.items()
if sensor_id in available_resources
}
if KEY_STATUS in known_available_resources:
known_available_resources[KEY_STATUS_DISPLAY] = SENSOR_TYPES[
KEY_STATUS_DISPLAY
].name
return {
vol.Required(CONF_RESOURCES, default=selected_resources): cv.multi_select(
known_available_resources
)
}
def _ups_schema(ups_list):
"""UPS selection schema."""
return vol.Schema({vol.Required(CONF_ALIAS): vol.In(ups_list)})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from _base_schema with values provided by the user.
"""
host = data[CONF_HOST]
port = data[CONF_PORT]
alias = data.get(CONF_ALIAS)
username = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
await hass.async_add_executor_job(data.update)
if not (status := data.status):
raise CannotConnect
return {"ups_list": data.ups_list, "available_resources": status}
def _format_host_port_alias(user_input):
"""Format a host, port, and alias so it can be used for comparison or display."""
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
alias = user_input.get(CONF_ALIAS)
if alias:
return f"{alias}@{host}:{port}"
return f"{host}:{port}"
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Network UPS Tools (NUT)."""
VERSION = 1
def __init__(self):
"""Initialize the nut config flow."""
self.nut_config = {}
self.available_resources = {}
self.discovery_info = {}
self.ups_list = None
self.title = None
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered nut device."""
self.discovery_info = discovery_info
await self._async_handle_discovery_without_unique_id()
self.context["title_placeholders"] = {
CONF_PORT: discovery_info.get(CONF_PORT, DEFAULT_PORT),
CONF_HOST: discovery_info[CONF_HOST],
}
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle the user input."""
errors = {}
if user_input is not None:
if self.discovery_info:
user_input.update(
{
CONF_HOST: self.discovery_info[CONF_HOST],
CONF_PORT: self.discovery_info.get(CONF_PORT, DEFAULT_PORT),
}
)
info, errors = await self._async_validate_or_error(user_input)
if not errors:
self.nut_config.update(user_input)
if len(info["ups_list"]) > 1:
self.ups_list = info["ups_list"]
return await self.async_step_ups()
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="user", data_schema=_base_schema(self.discovery_info), errors=errors
)
async def async_step_ups(self, user_input=None):
"""Handle the picking the ups."""
errors = {}
if user_input is not None:
self.nut_config.update(user_input)
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
info, errors = await self._async_validate_or_error(self.nut_config)
if not errors:
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="ups",
data_schema=_ups_schema(self.ups_list),
errors=errors,
)
async def async_step_resources(self, user_input=None):
"""Handle the picking the resources."""
if user_input is None:
return self.async_show_form(
step_id="resources",
data_schema=vol.Schema(
_resource_schema_base(self.available_resources, [])
),
)
self.nut_config.update(user_input)
title = _format_host_port_alias(self.nut_config)
return self.async_create_entry(title=title, data=self.nut_config)
def _host_port_alias_already_configured(self, user_input):
"""See if we already have a nut entry matching user input configured."""
existing_host_port_aliases = {
_format_host_port_alias(entry.data)
for entry in self._async_current_entries()
if CONF_HOST in entry.data
}
return _format_host_port_alias(user_input) in existing_host_port_aliases
async def _async_validate_or_error(self, config):
errors = {}
info = {}
try:
info = await validate_input(self.hass, config)
except CannotConnect:
errors[CONF_BASE] = "cannot_connect"
|
except Exception: # pylint: disab
|
le=broad-except
_LOGGER.exception("Unexpected exception")
errors[CONF_BASE] = "unknown"
return info, errors
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for nut."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
scan_interval = self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
base_schema = {
vol.Optional(CONF_SCAN_INTERVAL, default=scan_interval): vol.All(
vol.Coerce(int), vol.Clamp(min=10, max=300)
)
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(base_schema))
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
lyw07/kolibri
|
kolibri/core/auth/test/test_roles_and_membership.py
|
Python
|
mit
| 10,992
| 0.001638
|
"""
Tests of role and membership calculations.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.test import TestCase
from ..constants import role_kinds
from ..models import Classroom
from ..models import Facility
from ..models import FacilityUser
from ..models import KolibriAnonymousUser
from ..models import LearnerGroup
from .helpers import create_dummy_facility_data
from .helpers import create_superuser
def flatten(lst):
if lst == []:
return lst
if isinstance(lst[0], list):
return flatten(lst[0]) + flatten(lst[1:])
return lst[:1] + flatten(lst[1:])
class RolesWithinFacilityTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
def test_admin_has_admin_role_for_own_facility(self):
admin = self.data["facility_admin"]
facility = self.data["facility"]
self.assertTrue(admin.has_role_for(role_kinds.ADMIN, facility))
self.assertIn(role_kinds.ADMIN, admin.get_roles_for(facility))
def test_coach_has_coach_role_for_own_classroom(self):
coach0 = self.data["classroom_coaches"][0]
classroom0 = self.data["classrooms"][0]
self.assertTrue(coach0.has_role_for(role_kinds.COACH, classroom0))
self.assertIn(role_kinds.COACH, coach0.get_roles_for(classroom0))
def test_coach_has_no_coach_role_for_other_classroom(self):
coach0 = self.data["classroom_coaches"][0]
cl
|
assroom1 = self.data["classrooms"][1]
self.assertFalse(coach0.has_role_for(role_kinds.COACH, classroom1))
self.assertNotIn(role_kinds.COACH, coach0.get_roles_for(classroom1))
def test_coach_has_coach_role_for_learner_from_own_classroom(self):
coach0 = self.data["classroom_coaches"][0]
learner0 = self.data["learners_one_group"][0][0]
self.as
|
sertTrue(coach0.has_role_for(role_kinds.COACH, learner0))
self.assertIn(role_kinds.COACH, coach0.get_roles_for(learner0))
def test_coach_has_no_coach_role_for_learner_from_other_classroom(self):
coach0 = self.data["classroom_coaches"][0]
learner1 = self.data["learners_one_group"][1][0]
self.assertFalse(coach0.has_role_for(role_kinds.COACH, learner1))
self.assertNotIn(role_kinds.COACH, coach0.get_roles_for(learner1))
class ImplicitMembershipTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="My Facility")
self.admin = FacilityUser.objects.create(
username="admin", facility=self.facility
)
self.facility.add_admin(self.admin)
self.learner = FacilityUser.objects.create(
username="learner", facility=self.facility
)
def test_has_admin_role_for_learner(self):
self.assertTrue(self.admin.has_role_for(role_kinds.ADMIN, self.learner))
def test_only_has_admin_role_for_learner(self):
self.assertEqual(
self.admin.get_roles_for(self.learner), set([role_kinds.ADMIN])
)
def test_admin_can_read_learner_object(self):
self.assertTrue(self.admin.can_read(self.learner))
def test_learner_is_in_list_of_readable_objects(self):
self.assertIn(
self.learner, self.admin.filter_readable(FacilityUser.objects.all())
)
class ExplicitMembershipTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="My Facility")
self.admin = FacilityUser.objects.create(
username="admin", facility=self.facility
)
self.classroom = Classroom.objects.create(name="Class", parent=self.facility)
self.classroom.add_admin(self.admin)
self.learner = FacilityUser.objects.create(
username="learner", facility=self.facility
)
self.group = LearnerGroup.objects.create(name="Group", parent=self.classroom)
self.group.add_member(self.learner)
def test_has_admin_role_for_learner(self):
self.assertTrue(self.admin.has_role_for(role_kinds.ADMIN, self.learner))
def test_only_has_admin_role_for_learner(self):
self.assertEqual(
self.admin.get_roles_for(self.learner), set([role_kinds.ADMIN])
)
def test_admin_can_read_learner_object(self):
self.assertTrue(self.admin.can_read(self.learner))
def test_learner_is_in_list_of_readable_objects(self):
self.assertIn(
self.learner, self.admin.filter_readable(FacilityUser.objects.all())
)
class RolesAcrossFacilitiesTestCase(TestCase):
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
def test_no_roles_between_users_across_facilities(self):
users1 = self.data1["all_users"]
users2 = self.data2["all_users"]
for user1 in users1:
for user2 in users2:
if not user1.is_superuser:
self.assertEqual(len(user1.get_roles_for(user2)), 0)
def test_no_roles_for_collections_across_facilities(self):
users1 = (
self.data1["classroom_coaches"]
+ [self.data1["facility_admin"]]
+ list(self.data1["facility"].get_members())
)
collections2 = (
[self.data2["facility"]]
+ self.data2["classrooms"]
+ flatten(self.data2["learnergroups"])
)
for user1 in users1:
for collection2 in collections2:
if not user1.is_superuser:
self.assertEqual(len(user1.get_roles_for(collection2)), 0)
class MembershipWithinFacilityTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
self.anon_user = KolibriAnonymousUser()
def test_facility_membership(self):
actual_members = flatten(
self.data["learners_one_group"]
+ [self.data["learner_all_groups"]]
+ self.data["unattached_users"]
+ [self.data["facility_admin"]]
+ [self.data["facility_coach"]]
+ self.data["classroom_admins"]
+ self.data["classroom_coaches"]
+ [self.data["superuser"]]
)
returned_members = self.data["facility"].get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
for user in actual_members:
self.assertTrue(user.is_member_of(self.data["facility"]))
self.assertFalse(self.anon_user.is_member_of(self.data["facility"]))
def test_classroom_membership(self):
for i, classroom in enumerate(self.data["classrooms"]):
actual_members = flatten(
self.data["learners_one_group"][i] + [self.data["learner_all_groups"]]
)
returned_members = classroom.get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
# ensure that `is_member` is True for all users in the classroom
for user in actual_members:
self.assertTrue(user.is_member_of(classroom))
# ensure that `is_member` is False for all users not in the classroom
for user in set(self.data["all_users"]) - set(actual_members):
self.assertFalse(user.is_member_of(classroom))
self.assertFalse(self.anon_user.is_member_of(classroom))
def test_learnergroup_membership(self):
for i, classroom_users in enumerate(self.data["learners_one_group"]):
for j, learnergroup_users in enumerate(classroom_users):
learnergroup = self.data["learnergroups"][i][j]
actual_members = [self.data["learners_one_group"][i][j]] + [
self.data["learner_all_groups"]
]
returned_members = learnergroup.get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
# ensure that `is_member` is True for all users in the learnergroup
for user in actual_members:
self.assertTrue(user.is_member_of(learnergroup))
# ensur
|
ep1cman/RFLED-Server
|
source/admin.py
|
Python
|
gpl-3.0
| 1,061
| 0.01131
|
#!/usr/bin/env python
import socket
# Set admin server settings
UDP_IP = '' # Leave empty for Broadcast support
ADMIN_PORT = 48899
# Local settings of your Raspberry Pi, used for app discovery
INT_IP = '10.0.1.61'
INT_MAC = '111a02bf232b'
# Code Starts Here #
# Create UDP socket, bind to it
adminsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
adminsock.bind((UDP_IP, ADMIN_PORT))
# Loop forever
while True:
admin
|
data, adminaddr = ad
|
minsock.recvfrom(64) # buffer size is 64 bytes
# Did we get a message?
if admindata is not None:
# print("admin command: ", str(admindata)) # Debugging
# If the client app is syncing to a unit
if str(admindata).find("Link_Wi-Fi") != -1:
RETURN = INT_IP + ',' + INT_MAC + ',' # Return our IP/MAC
# print("admin return: ", RETURN) # Debugging
adminsock.sendto(bytes(RETURN, "utf-8"),adminaddr) # Send Response
else:
adminsock.sendto(bytes('+ok', "utf-8"),adminaddr) # Send OK for each packet we get
else:
break
|
oskarm91/sis
|
apps/users/migrations/0005_parentrelation_signature.py
|
Python
|
bsd-3-clause
| 488
| 0.002049
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20150428_2142'),
]
|
operations = [
migrations.AddField(
model_name='parentrelation',
name='signature',
|
field=models.CharField(max_length=255, null=True, verbose_name='sig', blank=True),
preserve_default=True,
),
]
|
xrg/openerp-server
|
bin/addons/base/res/res_config.py
|
Python
|
agpl-3.0
| 18,280
| 0.002899
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
from osv import osv, fields
from tools.translate import _
from tools import ustr
import pooler
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
_inherit = 'ir.wizard.screen'
logger = logging.getLogger('res_config.actions')
__logger = logging.getLogger(_name)
def get_current_progress(self, cr, uid, context=None):
'''Return a description the current progress of configuration:
a tuple of (non_open_todos:int, total_todos: int)
'''
return (self.pool.get('ir.actions.todo')\
.search_count(cr, uid, [('state','<>','open')], context),
self.pool.get('ir.actions.todo')\
.search_count(cr, uid, [], context))
def _progress(self, cr, uid, context=None):
closed, total = self.get_current_progress(cr, uid, context=context)
if total:
return round(closed*100./total)
return 100.
_columns = dict(
progress = fields.float('Configuration Progress', readonly=True),
)
_defaults = dict(
progress = _progress,
)
def _next_action(self, cr, uid, context=None):
todos = self.pool.get('ir.actions.todo')
active_todos = todos.search(cr, uid, [('state','=','open')],
limit=1)
if active_todos:
todo_obj = todos.browse(cr, uid, active_todos[0], context=None)
todo_groups = map(lambda x:x.id, todo_obj.groups_id)
dont_skip_todo = True
if todo_groups:
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid=ANY(%s)",(uid, todo_groups,))
dont_skip_todo = bool(cr.fetchone())
if dont_skip_todo:
return todos.browse(cr, uid, active_todos[0], context=None)
else:
todos.write(cr, uid, active_todos[0], {'state':'skip'}, context=None)
return self._next_action(cr, uid)
return None
def _set_previous_todo(self, cr, uid, state, context=None):
""" lookup the previous (which is still the next at this point)
ir.actions.todo, set it to whatever state was provided.
"""
# this is ultra brittle, but apart from storing the todo id
# into the res.config view, I'm not sure how to get the
# "previous" todo
if context is None:
context = {}
if context.get('active_action_todo'):
previous_todo = self.pool.get('ir.actions.todo').browse(cr, uid, context['active_action_todo'], context=context)
else:
previous_todo = self._next_action(cr, uid, context=context)
if not previous_todo:
self.__logger.warn(_("Couldn't find previous ir.actions.todo"))
return
previous_todo.write({'state':state}
|
)
def _next(self, cr, uid, context=None):
next = self._next_action(cr, uid)
if next:
action = next.action_id
return {
'view_mode': action.view_mode,
'view_type': action.view_type,
'view_id': action.view_id and [action.view_id.id] or False,
'res_model': action.res_model,
'type': action.type,
'target': action.target,
'context'
|
: {'active_action_todo': next.id},
}
self.logger.info('All configuration actions have been executed.')
current_user_menu = self.pool.get('res.users')\
.browse(cr, uid, uid).menu_id
# return the action associated with the menu
return self.pool.get(current_user_menu.type)\
.read(cr, uid, current_user_menu.id)
def start(self, cr, uid, ids, context=None):
ids2 = self.pool.get('ir.actions.todo').search(cr, uid, [], context=context)
for todo in self.pool.get('ir.actions.todo').browse(cr, uid, ids2, context=context):
if (todo.restart=='always') or (todo.restart=='onskip' and (todo.state in ('skip','cancel'))):
todo.write({'state':'open'})
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
self._set_previous_todo(cr, uid, state='done', context=context)
next = self.execute(cr, uid, ids, context=None)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
self._set_previous_todo(cr, uid, state='skip', context=context)
next = self.cancel(cr, uid, ids, context=None)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action
|
Freso/listenbrainz-server
|
listenbrainz_spark/utils/tests/test_init.py
|
Python
|
gpl-2.0
| 5,143
| 0.001556
|
import os
import tempfile
from datetime import datetime
from listenbrainz_spark.tests import SparkTestCase
from listenbrainz_spark import utils, path, config
from pyspark.sql import Row
class UtilsTestCase(SparkTestCase):
# use path_ as prefix for all paths in this class.
path_ = "/test"
temp_path_ = "/temp"
def tearDown(self):
if utils.path_exists(self.path_):
utils.delete_dir(self.path_, recursive=True)
if utils.path_exists(self.temp_path_):
utils.delete_dir(self.temp_path_, recursive=True)
def test_append_dataframe(self):
hdfs_path = self.path_ + '/test_df.parquet'
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
utils.append(df, hdfs_path)
new_df = utils.read_files_from_HDFS(hdfs_path)
self.assertEqual(new_df.count(), 1)
df = utils.create_dataframe([Row(column1=3, column2=4)], schema=None)
utils.append(df, hdfs_path)
appended_df = utils.read_files_from_HDFS(hdfs_path)
self.assertEqual(appended_df.count(), 2)
def test_create_dataframe(self):
hdfs_path = self.path_ + '/test_df.parquet'
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
self.assertEqual(df.count(), 1)
utils.save_parquet(df, hdfs_path)
received_df = utils.read_files_from_HDFS(hdfs_path)
self.assertEqual(received_df.count(), 1)
def test_create_dir(self):
utils.create_dir(self.path_)
status = utils.path_exists(self.path_)
self.assertTrue(status)
def test_delete_dir(self):
utils.create_dir(self.path_)
utils.delete_dir(self.path_)
status = utils.path_exists(self.path_)
self.assertFalse(status)
def test_get_listens(self):
from_date = datetime(2019, 10, 1)
to_date = datetime(2019, 11, 1)
df = utils.create_dataframe([Row(column1=1, column2=2)], schem
|
a=None)
dest_path = self.path_ + '/{}/{}.parquet'.format(from_date.year, from_date.month)
utils.save_parquet(df, dest_path)
df = utils.create_dataframe([Row(column1=3, column2=4)], schema=None)
dest_pa
|
th = self.path_ + '/{}/{}.parquet'.format(to_date.year, to_date.month)
utils.save_parquet(df, dest_path)
received_df = utils.get_listens(from_date, to_date, self.path_)
self.assertEqual(received_df.count(), 2)
def test_path_exists(self):
utils.create_dir(self.path_)
status = utils.path_exists(self.path_)
self.assertTrue(status)
def test_save_parquet(self):
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
utils.save_parquet(df, self.path_)
received_df = utils.read_files_from_HDFS(self.path_)
self.assertEqual(received_df.count(), 1)
def test_upload_to_HDFS(self):
temp_file = tempfile.mkdtemp()
local_path = os.path.join(temp_file, 'test_file.txt')
with open(local_path, 'w') as f:
f.write('test file')
self.path_ = '/test/upload.parquet'
utils.upload_to_HDFS(self.path_, local_path)
status = utils.path_exists(self.path_)
self.assertTrue(status)
def test_rename(self):
utils.create_dir(self.path_)
test_exists = utils.path_exists(self.path_)
self.assertTrue(test_exists)
utils.rename(self.path_, self.temp_path_)
test_exists = utils.path_exists(self.path_)
self.assertFalse(test_exists)
temp_exists = utils.path_exists(self.temp_path_)
self.assertTrue(temp_exists)
utils.delete_dir(self.temp_path_)
def test_copy(self):
# Test directories
utils.create_dir(self.path_)
utils.create_dir(os.path.join(self.path_, "a"))
utils.create_dir(os.path.join(self.path_, "b"))
# DataFrames to create parquets
df_a = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
df_b = utils.create_dataframe([Row(column1=3, column2=4)], schema=None)
df_c = utils.create_dataframe([Row(column1=5, column2=6)], schema=None)
# Save DataFrames in respective directories
utils.save_parquet(df_a, os.path.join(self.path_, "a", "df_a.parquet"))
utils.save_parquet(df_b, os.path.join(self.path_, "b", "df_b.parquet"))
utils.save_parquet(df_c, os.path.join(self.path_, "df_c.parquet"))
utils.copy(self.path_, self.temp_path_, overwrite=True)
# Read copied DataFrame
cp_df_a = utils.read_files_from_HDFS(os.path.join(self.temp_path_, "a", "df_a.parquet"))
cp_df_b = utils.read_files_from_HDFS(os.path.join(self.temp_path_, "b", "df_b.parquet"))
cp_df_c = utils.read_files_from_HDFS(os.path.join(self.temp_path_, "df_c.parquet"))
# Check if both DataFrames are same
self.assertListEqual(df_a.rdd.map(list).collect(), cp_df_a.rdd.map(list).collect())
self.assertListEqual(df_b.rdd.map(list).collect(), cp_df_b.rdd.map(list).collect())
self.assertListEqual(df_c.rdd.map(list).collect(), cp_df_c.rdd.map(list).collect())
|
hsoft/musicguru
|
qt/fs_model.py
|
Python
|
bsd-3-clause
| 7,759
| 0.006702
|
# Created By: Virgil Dupras
# Created On: 2009-09-19
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from PyQt4.QtCore import Qt, SIGNAL, QMimeData, QByteArray
from PyQt4.QtGui import QPixmap
from hscommon.conflict import is_conflicted
from hscommon.util import dedupe, format_size, format_time
from hscommon.path import Path
from qtlib.tree_model import TreeNode, TreeModel
from core.fs_utils import smart_move
MIME_PATHS = 'application/musicguru.paths'
DESIGN_BOARD_NAME = '<design board>'
IGNORE_BOX_NAME = '<ignore box>'
class FSNode(TreeNode):
def __init__(self, model, parent, ref, row):
TreeNode.__init__(self, model, parent, row)
self.ref = ref
self._data = None
self._imageName = None
def __repr__(self):
return "<FSNode %s>" % self.ref.name
def _getData(self):
raise NotImplementedError()
def _getImageName(self):
raise NotImplementedErro
|
r()
def invalidate(self, with_subnodes=False):
if with_subnodes:
for node in self.subnodes:
node.invalidate(with_subnodes=True)
self._data = None
self._imageName = None
TreeNode.invalidate(self)
@property
def data(self):
if self._data is None:
self._data = self._getData()
return self._data
@property
def imageName(self):
if self._imag
|
eName is None:
self._imageName = self._getImageName()
return self._imageName
class SongNode(FSNode):
def _getData(self):
song = self.ref
return [
song.name,
song.original.parent_volume.name,
0,
format_size(song.size, 2, 2, False),
format_time(song.duration, with_hours=False),
]
def _getImageName(self):
return 'song_conflict' if is_conflicted(self.ref.name) else 'song'
def _getChildren(self):
return []
class FolderNode(FSNode):
def _getData(self):
folder = self.ref
parent_volumes = dedupe(song.original.parent_volume for song in folder.iterallfiles())
return [
folder.name,
','.join(l.name for l in parent_volumes),
folder.get_stat('filecount'),
format_size(folder.get_stat('size'), 2, 2, False),
format_time(folder.get_stat('duration')),
]
def _getImageName(self):
return 'folder_conflict' if self.ref.allconflicts else 'folder'
def _createNode(self, ref, row):
if ref.is_container:
return FolderNode(self.model, self, ref, row)
else:
return SongNode(self.model, self, ref, row)
def _getChildren(self):
return self.ref.dirs + self.ref.files
class DummyNode(FSNode):
def _getData(self):
return [''] * 5
def _getImageName(self):
return ''
def _getChildren(self):
return []
class FSModel(TreeModel):
HEADER = ['Name', 'Location', 'Songs', 'Size (MB)', 'Time']
def __init__(self, app, ref, name):
self.app = app
self.ref = ref
self.name = name # the name is going to be the first item in the paths passed around in d&d
TreeModel.__init__(self)
def _createDummyNode(self, parent, row):
return DummyNode(self, parent, None, row)
def _createNode(self, ref, row):
if ref.is_container:
return FolderNode(self, None, ref, row)
else:
return SongNode(self, None, ref, row)
def _getChildren(self):
return self.ref.dirs
def columnCount(self, parent):
return len(self.HEADER)
def data(self, index, role):
if not index.isValid():
return None
node = index.internalPointer()
if role == Qt.DisplayRole:
return node.data[index.column()]
elif role == Qt.DecorationRole:
if index.column() == 0:
return QPixmap(":/{0}".format(node.imageName))
elif role == Qt.EditRole:
if index.column() == 0:
return node.data[index.column()]
return None
def dropMimeData(self, mimeData, action, row, column, parentIndex):
# In the test I have made, the row and column args always seem to be -1/-1 except when
# parentIndex is invalid (which means that the drop destination is the root node).
def find_path(path):
if path[0] == DESIGN_BOARD_NAME:
return self.app.board.find_path(path[1:])
elif path[0] == IGNORE_BOX_NAME:
return self.app.board.ignore_box.find_path(path[1:])
if not mimeData.hasFormat(MIME_PATHS):
return False
if parentIndex.isValid():
destNode = parentIndex.internalPointer()
else:
destNode = self
paths = str(mimeData.data(MIME_PATHS), 'utf-8').split('\n')
sourceItems = set(find_path(Path(path)) for path in paths)
sourceItems = set(item for item in sourceItems if item.parent not in sourceItems | set([destNode.ref]))
if not sourceItems:
return False
smart_move(sourceItems, destNode.ref, allow_merge=True)
destNode.invalidate()
# InsertRow calls have to be made at correct indexes or else the subsequent removeRows call
# will be made at incorrect indexes. To do so, we just go through every subitem of destNode.ref
# and if it's in sourceItems, we call insertRow.
# destNode.subnodes
for index, node in enumerate(destNode.subnodes):
if node.ref in sourceItems:
self.insertRow(index, parentIndex)
return True
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled | Qt.ItemIsDropEnabled
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled
if index.column() == 0:
flags |= Qt.ItemIsEditable
return flags
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole and section < len(self.HEADER):
return self.HEADER[section]
return None
def insertRows(self, row, count, parentIndex):
node = parentIndex.internalPointer() if parentIndex.isValid() else self
self.beginInsertRows(parentIndex, row, row + count - 1)
node.invalidate()
self.endInsertRows()
return True
def mimeData(self, indexes):
nodes = dedupe(index.internalPointer() for index in indexes)
paths = [str(self.name + node.ref.path) for node in nodes]
data = '\n'.join(paths).encode('utf-8')
mimeData = QMimeData()
mimeData.setData(MIME_PATHS, QByteArray(data))
return mimeData
def mimeTypes(self):
return [MIME_PATHS]
def removeRows(self, row, count, parentIndex):
node = parentIndex.internalPointer() if parentIndex.isValid() else self
self.beginRemoveRows(parentIndex, row, row + count - 1)
node.invalidate()
self.endRemoveRows()
return True
def refreshNode(self, node):
if node is None:
self.invalidate()
return
node.invalidate(with_subnodes=True)
self.emit(SIGNAL('layoutChanged()'))
def supportedDropActions(self):
return Qt.MoveAction
|
tiradoe/Giflocker
|
bin/pilfile.py
|
Python
|
lgpl-3.0
| 2,695
| 0.001113
|
#!/Users/tiradoe/Projects/Giflocker/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
|
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported for
|
mats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
|
larsbutler/swift
|
test/unit/account/test_server.py
|
Python
|
apache-2.0
| 99,400
| 0.00001
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import mock
import unittest
from tempfile import mkdtemp
from shutil import rmtree
from time import gmtime
from test.unit import FakeLogger
import itertools
import random
import json
from six import BytesIO
from six import StringIO
import xml.dom.minidom
from swift import __version__ as swift_version
from swift.common.swob import (Request, WsgiBytesIO, HTTPNoContent)
from swift.common import constraints
from swift.account.server import AccountController
from swift.common.utils import (normalize_timestamp, replication, public,
mkdirs, storage_directory)
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies, debug_logger
from swift.common.storage_policy import StoragePolicy, POLICIES
@patch_policies
class TestAccountController(unittest.TestCase):
"""Test swift.account.server.AccountController"""
def setUp(self):
"""Set up for testing swift.account.server.AccountController"""
self.testdir_base = mkdtemp()
self.testdir = os.path.join(self.testdir_base, 'account_server')
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.account.server.AccountController"""
try:
rmtree(self.testdir_base)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def test_OPTIONS(self):
server_handler = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
self.assertEqual(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version))
def test_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_DELETE_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_not_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
# We now allow deleting non-empty accounts
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_now_empty(self):
req
|
= Request.blank('/sda1/p/a',
|
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '2',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_REPLICATE_insufficient_storage(self):
conf = {'devices': self.testdir, 'mount_check': 'true'}
self.account_controller = AccountController(conf)
def fake_check_mount(*args, **kwargs):
return False
with mock.patch("swift.common.constraints.check_mount",
fake_check_mount):
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
resp = req.get_response(self.account_controller)
self.assertEqual(resp.status_int, 507)
def test_REPLICATE_works(self):
mkdirs(os.path.join(self.testdir, 'sda1', 'account', 'p', 'a', 'a'))
db_file = os.path.join(self.testdir, 'sda1',
storage_directory('account', 'p', 'a'),
'a' + '.db')
open(db_file, 'w')
def fake_rsync_then_merge(sel
|
webpp-studio/codestyle
|
tests/test_system_wrappers.py
|
Python
|
gpl-3.0
| 4,814
| 0
|
"""Проверки модуля system_wrappers."""
from logging import INFO
from unittest import TestCase
from unittest.mock import Mock, call, patch
from codestyle import system_wrappers
from codestyle.system_wrappers import (
ExitCodes,
check_output,
interrupt_program_flow,
)
class Test(TestCase):
"""Проверка функций модуля."""
@patch('codestyle.system_wrappers.sys', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_interrupt_program_flow(
self, mocked_logger: Moc
|
k, mocked_sys: Mock
):
"""Проверка interrupt_program_flow."""
mock_log = Mock()
mocked_logger.log = mock_log
mock_exit = Mock()
mocked_sys.exit = mock_exit
interrupt_program_flow(log_message='Проверка
|
вызова функции.')
self.assertEqual(True, mock_log.called)
self.assertEqual(1, mock_log.call_count)
args, kwargs = mock_log.call_args
self.assertTupleEqual((INFO, 'Проверка вызова функции.'), args)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mock_exit.called)
self.assertEqual(1, mock_exit.call_count)
args, kwargs = mock_exit.call_args
self.assertTupleEqual((ExitCodes.SUCCESS,), args)
self.assertDictEqual({}, kwargs)
@patch('codestyle.system_wrappers.check_process_output', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_check_output(
self, mocked_logger: Mock, mocked_process_output_checker: Mock
):
"""Проверка check_output."""
mock_debug = Mock()
mocked_logger.debug = mock_debug
mock_rstrip = Mock()
mock_decode = Mock(return_value=Mock(rstrip=mock_rstrip))
mocked_process_output_checker.return_value = Mock(decode=mock_decode)
check_output(('application', 'run'))
self.assertEqual(True, mock_debug.called)
self.assertEqual(1, mock_debug.call_count)
args, kwargs = mock_debug.call_args
self.assertTupleEqual(
('Проверка наличия application в системе...',), args
)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mocked_process_output_checker.called)
self.assertEqual(1, mocked_process_output_checker.call_count)
args, kwargs = mocked_process_output_checker.call_args
self.assertTupleEqual((('application', 'run'),), args)
self.assertDictEqual({'timeout': 10}, kwargs)
self.assertEqual(True, mock_decode.called)
self.assertEqual(1, mock_decode.call_count)
args, kwargs = mock_decode.call_args
self.assertTupleEqual((), args)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mock_rstrip.called)
self.assertEqual(1, mock_rstrip.call_count)
args, kwargs = mock_rstrip.call_args
self.assertTupleEqual((), args)
self.assertDictEqual({}, kwargs)
@patch(
'codestyle.system_wrappers.interrupt_program_flow', new_callable=Mock
)
@patch('codestyle.system_wrappers.check_process_output', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_check_output_with_error(
self,
mocked_logger: Mock,
mocked_process_output_checker: Mock,
mocked_interrupt_program_flow: Mock,
):
"""Проверка check_output с ошибкой внутри."""
mock_debug = Mock()
mock_warning = Mock()
mocked_logger.debug = mock_debug
mocked_logger.warning = mock_warning
mocked_process_output_checker.side_effect = FileNotFoundError(
'Исполняемый файл application не найден.'
)
check_output(('application', 'run'))
self.assertEqual(True, mock_debug.called)
self.assertEqual(2, mock_debug.call_count)
self.assertEqual(1, mock_warning.call_count)
self.assertIn(
call('Проверка наличия application в системе...'),
mock_debug.mock_calls,
)
self.assertIn(
call('Инструмент application не найден.'), mock_warning.mock_calls
)
self.assertIn(
call('Исполняемый файл application не найден.'),
mock_debug.mock_calls,
)
self.assertEqual(True, mocked_interrupt_program_flow.called)
self.assertEqual(1, mocked_interrupt_program_flow.call_count)
args, kwargs = mocked_interrupt_program_flow.call_args
self.assertTupleEqual((ExitCodes.UNSUCCESSFUL,), args)
self.assertDictEqual({}, kwargs)
|
maartenbreddels/ipyvolume
|
ipyvolume/hotreload.py
|
Python
|
mit
| 1,466
| 0.000682
|
from pathlib import Path
import logging
logger = logging.getLogger('ipyvolume')
HERE = Path(__file__).parent
_figures = []
_watching = set()
def _update_shaders(path=None, file_changed=None):
names = ['volr-fragment', 'volr-vertex', 'mesh-vertex', 'mesh-fragment', 'scatter-vertex', 'scatter-fragment', 'shadow-vertex', 'shadow-fragment']
for figure in _figures:
shaders = {}
# TODO: only read the ones we change
|
for name in names:
shader_path = path / (name + ".glsl")
with shader_path.open() as f:
shaders[name] = f.read()
figure._sha
|
ders = shaders
def watch(figure, path=None):
_figures.append(figure)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
if path is None:
# this assues a editable install (pip install -e .)
path = HERE / '../js/glsl/'
class ShaderEventHandler(FileSystemEventHandler):
def on_modified(self, event):
super(ShaderEventHandler, self).on_modified(event)
if not event.is_directory:
logger.info(f'updating: {event.src_path}')
_update_shaders(path, event.src_path)
observer = Observer()
if path not in _watching:
logger.info(f'watching {path}')
observer.schedule(ShaderEventHandler(), path, recursive=True)
observer.start()
_watching.add(path)
_update_shaders(path)
|
csadorf/signac
|
examples/ideal_gas_project/project.py
|
Python
|
bsd-3-clause
| 472
| 0.006356
|
# project.py
import signac
def classify(job):
yield 'init'
if 'V' in job.document:
yield 'volume-computed'
def next_operation(job):
if 'volume-computed' not in classify(job):
return 'compute_volume'
if __name__ ==
|
'__main__':
project = signac.get_project()
print(project)
for
|
job in project.find_jobs():
labels = ','.join(classify(job))
p = '{:04.1f}'.format(job.statepoint()['p'])
print(job, p, labels)
|
tensorflow/privacy
|
research/pate_2018/ICLR2018/rdp_cumulative.py
|
Python
|
apache-2.0
| 12,995
| 0.011081
|
# Copyright 2017 The 'Scalable Private Learning with PATE' Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Plots three graphs illustrating cost of privacy per answered query.
A script in support of the paper "Scalable Private Learning with PATE" by
Nicolas Papernot, Shuang Song, Ilya Mironov, Ananth Raghunathan, Kunal Talwar,
Ulfar Erlingsson (https://arxiv.org/abs/1802.08908).
The input is a file containing a numpy array of votes, one query per row, one
class per column. Ex:
43, 1821, ..., 3
31, 16, ..., 0
...
0, 86, ..., 438
The output is written to a specified directory and consists of three pdf files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import pickle
import sys
sys.path.append('..') # Main modules reside in the parent directory.
from absl import app
from absl import flags
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import core as pate
plt.style.use('ggplot')
FLAGS = flags.FLAGS
flags.DEFINE_boolean('cache', False,
'Read results of privacy analysis from cache.')
flags.DEFINE_string('counts_file', None, 'Counts file.')
flags.DEFINE_string('figures_dir', '', 'Path where figures are written to.')
flags.mark_flag_as_required('counts_file')
def run_analysis(votes, mechanism, noise_scale, params):
"""Computes data-dependent privacy.
Args:
votes: A matrix of votes, where each row contains votes in one instance.
mechanism: A name of the mechanism ('lnmax', 'gnmax', or 'gnmax_conf')
noise_scale: A mechanism privacy parameter.
params: Other privacy parameters.
Returns:
Four lists: cumulative privacy cost epsilon, how privacy budget is split,
how many queries were answered, optimal order.
"""
def compute_partition(order_opt, eps):
order_opt_idx = np.searchsorted(orders, order_opt)
if mechanism == 'gnmax_conf':
p = (rdp_select_cum[order_opt_idx],
rdp_cum[order_opt_idx] - rdp_select_cum[order_opt_idx],
-math.log(delta) / (order_opt - 1))
else:
p = (rdp_cum[order_opt_idx], -math.log(delta) / (order_opt - 1))
return [x / eps for x in p] # Ensures that sum(x) == 1
# Short list of orders.
# orders = np.round(np.concatenate((np.arange(2, 50 + 1, 1),
# np.logspace(np.log10(50), np.log10(1000), num=20))))
# Long list of orders.
orders = np.concatenate((np.arange(2, 100 + 1, .5),
np.logspace(np.log10(100), np.log10(500), num=100)))
delta = 1e-8
n = votes.shape[0]
eps_total = np.zeros(n)
partition = [None] * n
order_opt = np.full(n, np.nan, dtype=float)
answered = np.zeros(n, dtype=float)
rdp_cum = np.zeros(len(orders))
rdp_sqrd_cum = np.zeros(len(orders))
rdp_select_cum = np.zeros(len(orders))
answered_sum = 0
for i in range(n):
v = votes[i,]
if mechanism == 'lnmax':
logq_lnmax = pate.compute_logq_laplace(v, noise_scale)
rdp_query = pate.rdp_pure_eps(logq_lnmax, 2. / noise_scale, orders)
rdp_sqrd = rdp_query ** 2
pr_answered = 1
elif mechanism == 'gnmax':
logq_gmax = pate.compute_logq_gaussian(v, noise_scale)
rdp_query = pate.rdp_gaussian(logq_gmax, noise_scale, orders)
rdp_sqrd = rdp_query ** 2
pr_answered = 1
elif mechanism == 'gnmax_conf':
logq_step1 = pate.compute_logpr_answered(params['t'], params['sigma1'], v)
logq_step2 = pate.compute_logq_gaussian(v, noise_scale)
q_step1 = np.exp(logq_step1)
logq_step1_min = min(logq_step1, math.log1p(-q_step1))
rdp_gnmax_step1 = pate.rdp_gaussian(logq_step1_min,
2 ** .5 * params['sigma1'], orders)
rdp_gnmax_step2 = pate.rdp_gaussian(logq_step2, noise_scale, orders)
rdp_query = rdp_gnmax_step1 + q_step1 * rdp_gnmax_step2
# The expression below evaluates
# E[(cost_of_step_1 + Bernoulli(pr_of_step_2) * cost_of_step_2)^2]
rdp_sqrd = (
rdp_gnmax_step1 ** 2 + 2 * rdp_gnmax_step1 * q_step1 * rdp_gnmax_step2
+ q_step1 * rdp_gnmax_step2 ** 2)
rdp_select_cum += rdp_gnmax_step1
pr_answered = q_step1
else:
raise ValueError(
'Mechanism must be one of ["lnmax", "gnmax", "gnmax_conf"]')
rdp_cum += rdp_query
rdp_sqrd_cum += rdp_sqrd
answered_sum += pr_answered
answered[i] = answered_sum
eps_total[i], order_opt[i] = pate.compute_eps_from_delta(
orders, rdp_cum, delta)
partition[i] = compute_partition(order_opt[i], eps_total[i])
if i > 0 and (i + 1) % 1000 == 0:
rdp_var = rdp_sqrd_cum / i - (
rdp_cum / i) ** 2 # Ignore Bessel's correction.
order_opt_idx = np.searchsorted(orders, order_opt[i])
eps_std = ((i + 1) * rdp_var[order_opt_idx]) ** .5 # Std of the sum.
print(
'queries = {}, E[answered] = {:.2f}, E[eps] = {:.3f} (std = {:.5f}) '
'at order = {:.2f} (contribution from delta = {:.3f})'.format(
i + 1, answered_sum, eps_total[i], eps_std, order_opt[i],
-math.log(delta) / (order_opt[i] - 1)))
sys.stdout.flush()
return eps_total, partition, answered, order_opt
def print_plot_small(figures_dir, eps_lap, eps_gnmax, answered_gnmax):
"""Plots a graph of LNMax vs GNMax.
Args:
figures_dir: A name of the directory where to save the plot.
eps_lap: The cumulative privacy costs of the
|
Laplace mechanism.
eps_gnmax: The cumulative privacy costs of the Gaussian mechanism
answered_gnmax: The cumulative count of queries answered.
"""
xlim = 6000
x_axis = range(0, int(xlim), 10)
y_lap = np.zeros(len(x_axis), dtype=float)
y_gnmax = np.full(len(x_axis), np.nan, dtype=float)
for i in range(len(x_axis)):
x = x_axis[i]
y_lap[i] = eps_lap[x]
idx = np.searchsorted(answered_gnmax, x)
if idx < len(ep
|
s_gnmax):
y_gnmax[i] = eps_gnmax[idx]
fig, ax = plt.subplots()
fig.set_figheight(4.5)
fig.set_figwidth(4.7)
ax.plot(
x_axis, y_lap, color='r', ls='--', label='LNMax', alpha=.5, linewidth=5)
ax.plot(
x_axis,
y_gnmax,
color='g',
ls='-',
label='Confident-GNMax',
alpha=.5,
linewidth=5)
plt.xticks(np.arange(0, 7000, 1000))
plt.xlim([0, 6000])
plt.ylim([0, 6.])
plt.xlabel('Number of queries answered', fontsize=16)
plt.ylabel(r'Privacy cost $\varepsilon$ at $\delta=10^{-8}$', fontsize=16)
plt.legend(loc=2, fontsize=13) # loc=2 -- upper left
ax.tick_params(labelsize=14)
fout_name = os.path.join(figures_dir, 'lnmax_vs_gnmax.pdf')
print('Saving the graph to ' + fout_name)
fig.savefig(fout_name, bbox_inches='tight')
plt.show()
def print_plot_large(figures_dir, eps_lap, eps_gnmax1, answered_gnmax1,
eps_gnmax2, partition_gnmax2, answered_gnmax2):
"""Plots a graph of LNMax vs GNMax with two parameters.
Args:
figures_dir: A name of the directory where to save the plot.
eps_lap: The cumulative privacy costs of the Laplace mechanism.
eps_gnmax1: The cumulative privacy costs of the Gaussian mechanism (set 1).
answered_gnmax1: The cumulative count of queries answered (set 1).
eps_gnmax2: The cumulative privacy costs of the Gaussian mechanism (set 2).
partition_gnmax2: Allocation of eps for set 2.
answered_gnmax2: The cumulative count of queries answered (set 2).
"""
xlim = 6000
x_axis = range(0, int(xlim), 10)
lenx = len(x_axis)
y_lap = np.zer
|
nealedj/djangae
|
djangae/fields/iterable.py
|
Python
|
bsd-3-clause
| 10,309
| 0.003104
|
import copy
from django import forms
from django.db import models
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.db.models.fields.subclassing import Creator
from djangae.forms.fields import ListFormField
from django.utils.text import capfirst
class _FakeModel(object):
"""
An object of this class can pass itself off as a model instance
when used as an arguments to Field.pre_save method (item_fields
of iterable fields are not actually fields of any model).
"""
def __init__(self, field, value):
setattr(self, field.attname, value)
class IterableField(models.Field):
__metaclass__ = models.SubfieldBase
@property
def _iterable_type(self): raise NotImplementedError()
def db_type(self, connection):
return 'list'
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if value is None:
raise ValueError("You can't query an iterable field with None")
if lookup_type == 'isnull' and value in (True, False):
return value
if lookup_type != 'exact' and lookup_type != 'in':
raise ValueError("You can only query using exact and in lookups on iterable fields")
if isinstance(value, (list, set)):
return [ self.item_field_type.to_python(x) for x in value ]
return self.item_field_type.to_python(value)
def get_prep_value(self, value):
if value is None:
raise ValueError("You can't set a {} to None (did you mean {}?)".format(
self.__class__.__name__, str(self._iterable_type())
))
if isinstance(value, basestring):
# Catch accidentally assigning a string to a ListField
raise ValueError("Tried to assign a string to a {}".format(self.__class__.__name__))
return super(IterableField, self).get_prep_value(value)
def __init__(self, item_field_type, *args, **kwargs):
# This seems bonkers, we shout at people for specifying null=True, but then do it ourselves. But this is because
# *we* abuse None values for our own purposes (to represent an empty iterable) if someone else tries to then
# all hell breaks loose
if kwargs.get("null", False):
raise RuntimeError("IterableFields cannot be set as nullable (as the datastore doesn't differentiate None vs []")
kwargs["null"] = True
default = kwargs.get("default", [])
self._original_item_field_type = copy.deepcopy(item_field_type) # For deconstruction purposes
if default is not None and not callable(default):
kwargs["default"] = lambda: self._iterable_type(default)
if hasattr(item_field_type, 'attname'):
item_field_type = item_field_type.__class__
if callable(item_field_type):
item_field_type = item_field_type()
if isinstance(item_field_type, models.ForeignKey):
raise ImproperlyConfigured("Lists of ForeignKeys aren't supported, use RelatedSetField instead")
self.item_field_type = item_field_type
# We'll be pretending that item_field is a field of a model
# with just one "value" field.
assert not hasattr(self.item_field_type, 'attname')
self.item_field_type.set_attributes_from_name('value')
super(IterableField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IterableField, self).deconstruct()
args = (self._original_item_field_type,)
del kwargs["null"]
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
self.item_field_type.model = cls
self.item_field_type.name = name
super(IterableField, self).contribute_to_class(cls, name)
# If items' field uses SubfieldBase we also need to.
item_metaclass = getattr(self.item_field_type, '__metaclass__', None)
if item_metaclass and issubclass(item_metaclass, models.SubfieldBase):
setattr(cls, self.name, Creator(self))
def _map(self, function, iterable, *args, **kwargs):
return self._iterable_type(function(element, *args, **kwargs) for element in iterable)
def to_python(self, value):
if value is None:
return self._iterable_type([])
# Because a set cannot be defined in JSON, we must allow a list to be passed as the value
# of a SetField, as otherwise SetField data can't be loaded from fixtures
if not hasattr(value, "__iter__"): # Allows list/set, not string
raise ValueError("Tried to assign a {} to a {}".format(value.__class__.__name__, self.__class__.__name__))
return self._map(self.item_field_type.to_python, value)
def pre_save(self, model_instance, add):
"""
Gets our value from the model_instance and passes its items
through item_field's pre_save (using a fake model instance).
"""
value = getattr(model_instance, self.attname)
if value is None:
return None
return self._map(lambda item: self.item_field_type.pre_save(_FakeModel(self.item_field_type, item), add), value)
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
if value is None:
return None
# If the value is an empty iterable, store None
if value == self._iterable_type([]):
return None
return self._map(self.item_field_type.get_db_prep_save, value,
connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Passes the value through get_db_prep_lookup of item_field.
"""
return self.item_field_type.get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
def validate(self, value_list, model_instance):
""" We want to override the default validate method from django.db.fields.Field, because it
is only designed to deal with a single choice from the user.
"""
if not self.editable:
# Skip validation for non-editable fields
return
# Validate choices
if self.choices:
valid_values = []
for choice in self.choices:
if isinstance(choice[0], (list, tuple)):
# this is an optgroup, so look inside it for the options
for optgroup_choice in choice[0]:
valid_values.append(optgroup_choice[0])
else:
valid_values.append(choice[0])
for value in va
|
lue_list:
if value not in valid_values:
# TODO: if there is more than 1 invalid value then this should show all of the invalid values
raise ValidationError(self.error_messages['invalid_choice'] % value)
# Validate null-ness
if value_list is None and not self.null:
raise ValidationError(self.error_messages['null'])
if not self.blank and not value_list:
raise ValidationError(self.error_messages['blank'])
|
# apply the default items validation rules
for value in value_list:
self.item_field_type.clean(value, model_instance)
def formfield(self, **kwargs):
""" If this field has choices, then we can use a multiple choice field.
NB: The choices must be set on *this* field, e.g. this_field = ListField(CharField(), choices=x)
as opposed to: this_field = ListField(CharField(choices=x))
"""
#Largely lifted straight from Field.formfield() in django.models.__init__.py
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default(): #No idea what this does
|
wonderful4228/qualitybots
|
src/appengine/handlers/base.py
|
Python
|
apache-2.0
| 5,928
| 0.003036
|
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all request handlers.
Provides functionality useful to all request handlers, including extraction and
validation of request parameters.
"""
import os
import urllib2
# pylint: disable-msg=C6204
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class Error(urllib2.HTTPError):
"""Base class for all exceptions defined in this module."""
pass
class MissingRequiredParameterError(Error):
"""Raised when the request is missing a required parameter."""
def __init__(self, parameter_name):
msg = 'Request missing required parameter: %s' % parameter_name
Error.__init__(self, code=400, msg=msg, url='', hdrs='', fp=None)
class InvalidIntValueError(Error):
"""Raised when a request parameter is expected to be an int, but it isn't."""
def __init__(self, parameter_name, parameter_value):
msg = ('The specified value for parameter "%s" is not '
'a valid int: %s' % (parameter_name, parameter_value))
Error.__init__(self, code=400, msg=msg, url='', hdrs='', fp=None)
class InvalidParameterValueError(Error):
"""Raised when a request parameter has an invalid value."""
def __init__(self, parameter_name, parameter_value):
msg = ('The specified value for parameter "%s" is not '
'valid: %s' % (parameter_name, parameter_value))
Error.__init__(self, code=400, msg=msg, url='', hdrs='', fp=None)
class BaseHandler(webapp.RequestHandler):
"""Base class for the application handlers.
Defines common functionality used by various handlers. As a rule of thumb,
most code that extracts and validates parameters from the request belongs to
this class.
If any of the validations fails, one of the exceptions defined in this module
is raised; all of which inherits from the Error class, also defined in this
module.
The most basic type of retrieval is to retrieve an optional str
argument from the request. This is accomplished by calling
GetOptionalParameter, for example:
value = self.GetOptionalParameter('optional_param_name')
value = self.GetOptionalParameter('optional_param_name', 'default_value')
If the parameter is required by the request handler, this can be enforced
by calling GetRequiredParameter, for example
value = self.GetRequiredParameter('required_param_name')
In addition to enforcing whether a parameter is required or not, there are
variations to enforce the parameter value is of a specific type. Some of
the methods we have implemented at the moment retrieve an optional int
and a required URL, for example:
# Note that 10 is just an optional default value.
value = self.GetOptionalIntParameter('int_parameter_name', 10)
"""
def GetRequiredParameter(self, parameter_name):
"""Retrieves the value of a required request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
Returns:
The value of the specified parameter as a str.
Raises:
MissingRequiredParameterError: The specified parameter was not found in
the request.
"""
str_value = self.GetOptionalParameter(parameter_name)
if not str_value:
raise MissingRequiredParameterError(parameter_name)
return str_value
def GetOptionalParameter(self, parameter_name, default_value=None):
"""Retrieves the value of an optional request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
default_value: Value to return if the parameter is not found.
Returns:
The value of the specified parameter as a str, or default_value
if the parameter was not present in the request.
"""
return self.request.get(parameter_name, default_value)
def GetOptionalIntParameter(self, parameter_name, default_value):
"""Retrieves the value of an optional request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
default_value: Value to return if the parameter is not found.
Returns:
An int object with the value of the specified parameter as a str.
Raises:
InvalidIntValueError: The value of the specified parameter is
not a valid integer number.
"""
str_value = self.GetOptionalParameter(parameter_name)
# If the following line raises a ValueError, the calling code
# has a bug where they passed an invalid default_value. We let
# that exception propagate, causing a 500 response to client and
# sufficient error logging.
if not str_value:
return int(default_value)
try:
|
return int(str_value)
except ValueError:
raise InvalidIntValueError(parameter_name, str_value)
def RenderTemplate(self, name, template_args):
"""Renders the specified djan
|
go template.
Assumes the hander and templates are on different folders:
- root
- handlers
- templates
Args:
name: Str name of the file template.
template_args: Dict argument passed to the template.
"""
path = os.path.join(os.path.dirname(__file__), '..', 'templates', name)
self.response.out.write(template.render(path, template_args))
# Register filter
template.register_template_library(
'filters.filters')
|
studentenportal/web
|
apps/documents/migrations/0004_remove_document_flattr_disabled.py
|
Python
|
agpl-3.0
| 342
| 0
|
# Gen
|
erated by Django 2.2.11 on 2020-03-12 11:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("documents", "0003_auto_20200122_1624"),
]
operations = [
migrations.RemoveField(
model_name="document",
name="flattr_disabled",
),
|
]
|
opendatakosovo/municipality-procurement-visualizer
|
gpv/views/json/procurementtype.py
|
Python
|
gpl-2.0
| 764
| 0.005236
|
from flask import Response
from flask.views import View
from urllib2 import urlopen
from gpv import ut
|
ils
class ProcurementType(View):
def dispatch_request(self, komuna=None, year=None, company_slug=None):
api_base_url = utils.get_api_url()
url = "%s/procurement-type" % api_base_url
result = []
if komuna != None and year != None:
url = url + "/%s/%d" % (komuna, year)
result = urlopen(url).read()
elif company_slug != None:
url = url + "/%s" % (company_slug)
result = urlopen(url).read()
# Buil
|
d response object.
resp = Response(
response=result,
mimetype='application/json')
# Return response.
return resp
|
arrabito/DIRAC
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 9,724
| 0.015014
|
# -*- coding: utf-8 -*-
#
# DiracDocs documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 25 17:34:37 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
import subprocess
sys.path.insert(0, ".")
try:
import fakeEnvironment
except ImportError:
pass
try:
import fakeEnv
except ImportError:
pass
diracRelease = os.environ.get( 'DIRACVERSION', 'integration' )
if os.environ.get('READTHEDOCS') == 'True':
diracRelease = os.path.basename( os.path.abspath( "../../" ) )
if diracRelease.startswith("rel-"):
diracRelease = diracRelease[4:]
print 'conf.py: %s as DIRACVERSION' % diracRelease
#...............................................................................
# configuration
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if os.environ.get('READTHEDOCS') == 'True':
sys.path.append(os.path.abspath('.'))
diracPath = os.path.abspath( os.path.join( os.getcwd(), "../..") )
print "DiracPath",diracPath
buildfolder ="_build"
try:
os.mkdir( os.path.abspath( "../"+buildfolder) )
except:
pass
##We need to have the DIRAC module somewhere, or we cannot import it, as readtheDocs clones the repo into something based on the branchname
if not os.path.exists( "../../DIRAC" ):
diracLink = os.path.abspath( os.path.join( os.getcwd() , "../" , buildfolder, "DIRAC" ) )
print "DiracLink",diracLink
if not os.path.exists( diracLink ):
RES = subprocess.check_output( ["ln","-s", diracPath, diracLink ] )
diracPath = os.path.abspath( os.path.join( diracLink, ".." ) )
sys.path.insert(0, diracPath)
for path in sys.path:
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '')+":"+path
## this is not working at the moment because the DIRAC folder is not found by the buildScriptsDOC script
# print "Pythonpath",os.environ['PYTHONPATH']
# buildCommand = os.path.join( os.getcwd() , "../Tools/buildScriptsDOC.py" )
# scriptdir = os.path.abspath(os.path.join( os.getcwd() , "../", buildfolder, "scripts" ))
# try:
# os.mkdir( scriptdir )
# except:
# pass
# print "command", buildCommand
# code = subprocess.Popen( ["python", buildCommand, scriptdir ], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout , err = code.communicate()
# print "script",stdout
# print "script",err
os.environ["DIRAC"] = diracPath
print "DIRAC ENVIRON", os.environ["DIRAC"]
##singlehtml build needs too much memory, so we need to create less code documentation
buildtype = "limited" if any("singlehtml" in arg for arg in sys.argv ) else "full"
print "Chosing build type:", buildtype
buildCommand =os.path.join( os.getcwd() , "../Tools/MakeDoc.py" )
code = subprocess.Popen( ["python",buildCommand, buildtype], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout , err = code.communicate()
print "code",stdout
print "code",err
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DIRAC'
copyright = u'%s, DIRAC Project' % datetime.datetime.utcnow().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = diracRelease
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%H:%M %d/%m/%Y %Z'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
#ADRI: Ignore old stuff that is not included in the compilation
exclude_trees = [ 'AdministratorGuide/Configuration/ConfigurationReference' ]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
html_style = 'dirac.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {
# 'sidebarbgcolor':'#D5E2F2'
#}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<relea
|
se> documentation".
html_title = "DIRAC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The na
|
me of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/DIRAC-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d/%m/%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value
|
AleksNeStu/ggrc-core
|
src/ggrc/fulltext/mixin.py
|
Python
|
apache-2.0
| 2,559
| 0.007816
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module contains Indexed mixin class"""
import itertools
from collections import namedtuple
from sqlalchemy import inspect, orm
from ggrc import db
from ggrc import fulltext
ReindexRule = namedtuple("ReindexRule", ["model", "rule"])
# pylint: disable=too-few-public-methods
class Indexed(object):
"""Mixin for Index And auto reindex current model instance"""
AUTO_REINDEX_RULES = [
# Usage: ReindexRule("ModelName", lambda x: x.value)
]
PROPERTY_TEMPLATE = u"{}"
def delete_record(self):
fulltext.get_indexer().delete_record(
self.id,
self.__class__.__name__,
False
)
def create_record(self):
indexer = fulltext.get_indexer()
indexer.create_record(indexer.fts_record_for(self), False)
def update_indexer(self):
"""Update indexer for current instance"""
if self.__class__.__name__ not in fulltext.get_indexed_model_names():
return
self.delete_record()
self.create_record()
def get_reindex_pair(self):
return (self.__class__.__name__, self.id)
@classmethod
def get_insert_query_for(cls, ids):
"""Return insert class record query. It will return None, if it's empty."""
if not ids:
return
instances = cls
|
.indexed_query().filter(cls.id.in_(ids))
indexer = fulltext.get_indexer()
keys
|
= inspect(indexer.record_type).c
records = (indexer.fts_record_for(i) for i in instances)
rows = itertools.chain(*[indexer.records_generator(i) for i in records])
values = [{c.name: getattr(r, a) for a, c in keys.items()} for r in rows]
if values:
return indexer.record_type.__table__.insert().values(values)
@classmethod
def get_delete_query_for(cls, ids):
"""Return delete class record query. If ids are empty, will return None."""
if not ids:
return
indexer = fulltext.get_indexer()
return indexer.record_type.__table__.delete().where(
indexer.record_type.type == cls.__name__
).where(
indexer.record_type.key.in_(ids)
)
@classmethod
def bulk_record_update_for(cls, ids):
"""Bulky update index records for current class"""
delete_query = cls.get_delete_query_for(ids)
insert_query = cls.get_insert_query_for(ids)
for query in [delete_query, insert_query]:
if query is not None:
db.session.execute(query)
@classmethod
def indexed_query(cls):
return cls.query.options(
orm.Load(cls).load_only("id"),
)
|
alvin319/CarnotKE
|
jyhton/Lib/test/test_glob.py
|
Python
|
apache-2.0
| 7,373
| 0
|
import glob
import os
import shutil
import sys
import unittest
import warnings
from test.test_support import run_unittest, TESTFN
def fsdecode(s):
return unicode(s, sys.getfilesystemencoding())
class GlobTests(unittest.TestCase):
def norm(self, *parts):
return os.path.normpath(os.path.join(self.tempdir, *parts))
def mktemp(self, *parts):
filename = self.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
os.makedirs(base)
f = open(filename, 'w')
f.close()
def setUp(self):
self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('.aa', 'G')
self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if hasattr(os, 'symlink'):
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink('broken', self.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), self.norm('sym3'))
def tearDown(self):
try:
shutil.rmtree(self.tempdir)
except OSError:
warnings.warn("Failed to remove " + self.tempdir)
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertEqual(list(glob.iglob(p)), res)
ures = [fsdecode(x) for x in res]
self.assertEqual(glob.glob(fsdecode(p)), ures)
self.assertEqual(list(glob.iglob(fsdecode(p))), ures)
return res
def assertSequencesEqual_noorder(self, l1, l2):
l1 = list(l1)
l2 = list(l2)
self.assertEqual(set(l1), set(l2))
self.assertEqual(sorted(l1), sorted(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a'), [self.norm('a')])
eq(self.glob('a', 'D'), [self.norm('a', 'D')])
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
res = glob.glob('*')
# For a clean checkout, the next two assertions would never
# have failed, even with the change with Jython in
# https://hg.python.org/jython/rev/ea036792f304
#
# But for developers playing with things, we should not have
# it fail either
self.assertLessEqual({type(r) for r in res}, {str, unicode})
res = glob.glob(os.path.join(os.curdir, '*'))
self.assertLessEqual({type(r) for r in res}, {str, unicode})
# test return types are unicode, but only if os.listdir
# returns unicode filenames
tmp = os.listdir(fsdecode(os.curdir))
if {type(x) for x in tmp} == {unicode}:
res = glob.glob(u'*')
self.assertEqual({type(r) for r in res}, {unicode})
res = glob.glob(os.path.join(fsdecode(os.curdir), u'*'))
self.assertEqual({type(r) for r in res}, {unicode})
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
eq(self.glob('.*'), map(self.norm, ['.aa', '.bb']))
eq(self.glob('?aa'), map(self.norm, ['aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
def test_glob_nested_directory(self):
eq = self.assertSequencesEqual_noorder
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
self.norm('a', 'bcd', 'efg')])
eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('*', 'D'), [self.norm('a', 'D')])
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'),
self.norm('aab', 'F')])
def test_glob_directory_with_trailing_slash(self):
# Patterns ending with a slash shouldn't match non-dirs
res = glob.glob(self.norm('Z*Z') + os.sep)
self.assertEqual(res, [])
res = glob.glob(self.norm('ZZZ') + os.sep)
self.assertEqual(res, [])
# When there is a wildcard pattern which ends with os.sep, glob()
# doesn't blow up.
res = glob.glob(self.norm('aa*') + os.sep)
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{self.norm('aaa'), self.norm('aab')},
{self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
])
def test_glob_unicode_directory_with_trailing_slash(self):
# Same as test_glob_directory_with_trailing_slash, but with an
# unicode argument.
res = glob.glob(fsdecode(self.norm('Z*Z') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('ZZZ') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('aa*') + os.sep))
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{fsdecode(self.norm('aaa')), fsdecode(self.norm('aab'))},
{fsdecode(self.norm('aaa') + os.sep),
fsdecode(self.n
|
orm('aab') + os.sep)},
])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym3'), [self.norm('sym3')])
eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'),
self.norm('sym3', 'efg')])
self.assertIn(self.glob('sym3' + os
|
.sep),
[[self.norm('sym3')], [self.norm('sym3') + os.sep]])
eq(self.glob('*', '*F'),
[self.norm('aaa', 'zzzF'), self.norm('aab', 'F'),
self.norm('sym3', 'EF')])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_broken_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'),
self.norm('sym3')])
eq(self.glob('sym1'), [self.norm('sym1')])
eq(self.glob('sym2'), [self.norm('sym2')])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_glob_magic_in_drive(self):
eq = self.assertSequencesEqual_noorder
eq(glob.glob('*:'), [])
eq(glob.glob(u'*:'), [])
eq(glob.glob('?:'), [])
eq(glob.glob(u'?:'), [])
def test_main():
run_unittest(GlobTests)
if __name__ == "__main__":
test_main()
|
Depado/starmato-admin
|
starmato/admin/templatetags/_fieldset_related.py
|
Python
|
mit
| 976
| 0.008197
|
# -*- coding: utf-8 -*-
de
|
f before_related(adminform):
adminform.fieldsets_before = adminform.fieldsets
adminform.fieldsets_after = []
try:
adminform.fieldsets_before = adminform.fieldsets[:adminform.fieldsets.index(('related_go_here', {'fields': []}))]
adminform.fieldsets_after = adminform.fieldsets[adminform.fieldsets.index(('related_go_here', {'fields': []}))+1:]
adminform.fieldsets = adminform.fieldsets_before
return
|
adminform
except:
return adminform
def after_related(adminform):
try:
adminform.fieldsets = adminform.fieldsets_after
adminform.fieldsets_before = adminform.fieldsets[:adminform.fieldsets.index(('related_go_here', {'fields': []}))]
adminform.fieldsets_after = adminform.fieldsets[adminform.fieldsets.index(('related_go_here', {'fields': []}))+1:]
adminform.fieldsets = adminform.fieldsets_after
return adminform
except:
return adminform
|
AdL1398/PiCasso
|
source/modules/tester/testtermopi.py
|
Python
|
mit
| 1,603
| 0.009981
|
#!/usr/bin/python
"""
title : testtermopi.py
description : This program runs the termopi.py
: Displays the status of the resources (cpu load and memory usage) consumed by a Raspberry Pi
computer and the resources consumed by one or more containers instantiated in the Pi.
sour
|
ce :
author : Carlos Molina-Jimenez (Carlos.Molina@cl.cam.ac.uk)
date
|
: 27 Mar 2017
institution : Computer Laboratory, University of Cambridge
version : 1.0
usage :
notes :
compile and run : % python termopi.py
: It imports pidict.py, dockerctl.py and picheck.py which are found in
: ./modules.
: You need to include "./modules" in the PYTHONPATH environment variable to
: indicate python where to find the pidict.py, dockerctl.py and picheck.py.
: For example, in a bash shell, you need to include the following lines
: in your .bash_profile file located in you home directory (you can see it with
: (# ls -la).
:
: PYTHONPATH="./modules"
: export PYTHONPATH
python_version : Python 2.7.12
====================================================
"""
from modules.tools.termopi import termopi # class with dictionary data structure
# Threshold of cpu exhaustion
cpuUsageThreshold= 50
cpuLoadThreshold= 3
termo= termopi()
termo.prt_pi_resources()
termo.create_jsonfile_with_pi_status()
#termo.check_pi_resource_status(cpuUsageThreshold)
|
jck/uhdl
|
setup.py
|
Python
|
bsd-3-clause
| 1,201
| 0
|
from setuptools import setup
reqs = [
'myhdl>=0.9.0',
'click',
'wrapt'
]
test_reqs = ['pytest', 'hypothesis']
requires = {
'setup_requires': ['setuptools_scm'],
'install_requires': reqs,
'tests_require': test_reqs,
'extras_require': {
'testing': test_reqs,
}
}
setup(
name='uhdl',
use_scm_version=True,
description='Python Hardware Description for Humans.',
long_description=open('README.md').read(),
url='https://github.com/jck/uhdl',
author='Keerthan Jaic',
author_email='jckeerthan@gmail.com',
license="BSD",
packages=['uhdl'],
entry_points={
'console_scripts': [
'uhdl = uhdl.cli:cli'
]
},
zip_safe=False,
classifiers=[
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)'
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License ::
|
OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
keywords='myhdl uhdl',
|
**requires
)
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py
|
Python
|
apache-2.0
| 1,457
| 0
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language g
|
overning permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import seresnext_net
from seresnext_test_base import TestResnetBase, DeviceType
from functools import partial
class TestResnetGPU(TestResnetBase):
def test_seresnext_wi
|
th_learning_rate_decay(self):
# NOTE(zcd): This test is compare the result of use parallel_executor
# and executor, and the result of drop_out op and batch_norm op in
# this two executor have diff, so the two ops should be removed
# from the model.
check_func = partial(
self.check_network_convergence,
optimizer=seresnext_net.optimizer,
use_parallel_executor=False)
self._compare_result_with_origin_model(
check_func, use_device=DeviceType.CUDA, compare_seperately=False)
if __name__ == '__main__':
unittest.main()
|
stefanklug/mapnik
|
scons/scons-local-2.3.6/SCons/Variables/PathVariable.py
|
Python
|
lgpl-2.1
| 5,646
| 0.000886
|
"""SCons.Variables.PathVariable
This file defines an option type for SCons implementing path settings.
To be used whenever a a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined
validators are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which
should return True or False to indicate if the path
is valid. The arguments to the validator function
are: (key, val, env). The key is the name of the
option, the val is the path specified for the option,
and the env is the env to which the Otions have been
added.
Usage example:
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software
|
"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the follo
|
wing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PathVariable.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass(object):
def PathAccept(self, key, val, env):
"""Accepts any path, no checking done."""
pass
def PathIsDir(self, key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathIsDirCreate(self, key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
def PathIsFile(self, key, val, env):
"""validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathExists(self, key, val, env):
"""validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
texib/bitcoin-zoo
|
test/celeryconfig.py
|
Python
|
mit
| 525
| 0.00381
|
# List of modules to import when celery starts.
# CELERY_IMPORTS = ('libcloud_sandbox.tasks.code_execute', )
# Result store settings.
CELERY_RESULT_BACKEND = 'database'
CELERY_RESULT_DBURI = 'sqlite:///mydatabase.db'
# Broker settings.
BROKER_TRANSPORT = 'sqlalchemy'
BROKER_HOST = 'sqlite:///tasks.db'
BROKER_PORT = 5672
BROKE
|
R_VHOST = '/'
BROKER_USER = 'guest'
BROKER_PASSWORD = 'guest'
## Worker settings
CELERYD_CONCURRENCY = 1
CELERYD_TASK_TIME_LIMIT = 20
# CELERYD_LOG_FILE = 'celeryd.log'
CELERYD_LOG_L
|
EVEL = 'INFO'
|
TinyOS-Camp/DDEA-DEV
|
Archive/[14_10_11] Dr_Jung_Update/ddea_cli.py
|
Python
|
gpl-2.0
| 688
| 0.002907
|
#!/adsc/DDEA_PROTO/bin/python
from df_data_analysis_ddea import ddea_analysis
from datetime
|
import datetime
import traceba
|
ck
import sys
if __name__ == '__main__':
try:
if 3 <= len(sys.argv):
###urls = open(sys.argv[1]).readlines()
start_time = sys.argv[1]
end_time = sys.argv[2]
stime = datetime.strptime(start_time, "%y-%m-%d")
etime = datetime.strptime(end_time, "%y-%m-%d")
ddea_analysis('', stime, etime)
else:
raise "Invalid Arguments"
except:
print traceback.print_exc()
print("Example: %s 14-01-01 14-02-02" % sys.argv[0])
raise SystemExit
|
MSusik/invenio
|
invenio/ext/template/extensions.py
|
Python
|
gpl-2.0
| 1,614
| 0.008055
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Custom `Jinja2` extensions."""
from jinja2 import nodes
from jinja2.ext import Extension
from flask import g
class LangExtension(Extension):
"""Ease transition from legacy templates using ``<lang>...</lang>``."""
tags = set(['lang'])
def parse(self, parser):
"""Parse the template."""
lineno = parser.stream.next().lineno
body = parser.parse_statements(['name:endlang'], drop_needle=True)
return nodes.CallBlock(self.call_method('_lang'),
[], [], body).set_lineno(lineno)
@staticmethod
def _lang(caller):
"""Return current language string using `
|
filter_languages`."""
from invenio.modules.formatter.engine import filter_languages
return filter_la
|
nguages('<lang>' + caller() + '</lang>', g.ln)
|
tweemeterjop/thug
|
thug/DOM/W3C/HTML/HTMLFieldSetElement.py
|
Python
|
gpl-2.0
| 229
| 0
|
#!/usr/bi
|
n/env python
from .HTMLElement import HTMLElement
class HTMLFieldSetElement(HTMLElement):
def __init__(self, doc, tag):
HTMLElement.__init__(self, doc, tag)
@property
def form(self):
pass
| |
fewu/gnuradio_drm
|
gr-drm/python/qa_drm_add_tailbits_vbvb.py
|
Python
|
gpl-3.0
| 1,583
| 0.025268
|
#!/usr/bin/env python
#
# Copyright 2012 Communications Engineering Lab (CEL) / KIT (Karlsruhe Institute of Technology)
# Author: Felix Wunsch
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE
|
. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
#
from gnuradio import gr, gr_unittest
import drm
#import drm_swig
class qa_add_tailbits_vbvb (gr_unittest.TestCase):
def setUp (self):
self.tb =
|
gr.top_block ()
self.src = gr.vector_source_b((1,1,0,1), True, 4)
self.head = gr.head(4,3)
self.add_tailbits = drm.add_tailbits_vbvb(4,2)
self.snk = gr.vector_sink_b(6)
self.tb.connect(self.src, self.head, self.add_tailbits, self.snk)
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
self.assertTupleEqual(self.snk.data(), (1,1,0,1,0,0,1,1,0,1,0,0,1,1,0,1,0,0))
if __name__ == '__main__':
gr_unittest.main ()
|
chromy/cmdtest
|
examples/echo.py
|
Python
|
mit
| 198
| 0.010101
|
from cmdtest import Program, a
|
ssert_hook
echo = Program('echo')
@e
|
cho.test
def echo_string_should_output_string():
assert echo('foo').out == 'foo\n'
if __name__ == '__main__':
echo.run()
|
chokribr/invenio
|
invenio/modules/collections/models.py
|
Python
|
gpl-2.0
| 28,007
| 0.000036
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Database mod
|
els for collections."""
# General imports.
import re
from operator import itemgetter
from flask import g, url_for
from intbitset import intbitset
from invenio.base.globals import cfg
from invenio.base.i18n import _, gettext_set_language
from invenio.ext.sqlalchemy import db
from invenio.ext.sqlalchemy.utils import attribute_multi_dict_collection
from invenio.modules.formatter.registry import output_formats
from invenio.modules.search.models import Field, Fieldvalu
|
e
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm.collections import attribute_mapped_collection
from werkzeug.utils import cached_property
# Create your models here.
external_collection_mapper = attribute_multi_dict_collection(
creator=lambda k, v: CollectionExternalcollection(type=k,
externalcollection=v),
key_attr=lambda obj: obj.type,
val_attr=lambda obj: obj.externalcollection)
class Collection(db.Model):
"""Represent a Collection record."""
def __repr__(self):
"""Return class representation."""
return 'Collection <id: {0.id}, name: {0.name}, dbquery: {0.query}, ' \
'nbrecs: {0.nbrecs}>'.format(self)
def __unicode__(self):
suffix = ' ({0})'.format(_('default')) if self.id == 1 else ''
return u"{0.id}. {0.name}{1}".format(self, suffix)
def __str__(self):
return unicode(self).encode('utf-8')
__tablename__ = 'collection'
id = db.Column(db.MediumInteger(9, unsigned=True),
primary_key=True)
name = db.Column(db.String(255), unique=True, index=True,
nullable=False)
dbquery = db.Column(db.Text(20), nullable=True,
index=True)
@property
def nbrecs(self):
"""Number of records in the collection."""
from .cache import get_collection_nbrecs
return get_collection_nbrecs(self.name)
@property
def reclist(self):
"""Return hit set with record identifiers."""
from .cache import get_collection_reclist
return get_collection_reclist(self.name)
@property
def is_hosted(self):
"""Return True if collection is hosted elsewhere."""
return self.dbquery.startswith('hostedcollection:') if self.dbquery \
else False
_names = db.relationship(lambda: Collectionname,
backref='collection',
collection_class=attribute_mapped_collection(
'ln_type'),
cascade="all, delete, delete-orphan")
names = association_proxy(
'_names', 'value',
creator=lambda k, v: Collectionname(ln_type=k, value=v)
)
_boxes = db.relationship(lambda: Collectionboxname,
backref='collection',
collection_class=attribute_mapped_collection(
'ln_type'),
cascade="all, delete, delete-orphan")
boxes = association_proxy(
'_boxes', 'value',
creator=lambda k, v: Collectionboxname(ln_type=k, value=v)
)
_formatoptions = association_proxy('formats', 'format')
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def formatoptions(self):
"""Return list of format options."""
if len(self._formatoptions):
return [dict(f) for f in self._formatoptions]
else:
return [{'code': u'hb',
'name': _("HTML %(format)s", format=_("brief")),
'content_type': u'text/html'}]
formatoptions = property(formatoptions)
_examples_example = association_proxy('_examples', 'example')
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def examples(self):
"""Return list of example queries."""
return list(self._examples_example)
@property
def name_ln(self):
from invenio.legacy.search_engine import get_coll_i18nname
return get_coll_i18nname(self.name,
getattr(g, 'ln', cfg['CFG_SITE_LANG']))
# Another possible implementation with cache memoize
# @cache.memoize
# try:
# return db.object_session(self).query(Collectionname).\
# with_parent(self).filter(db.and_(Collectionname.ln==g.ln,
# Collectionname.type=='ln')).first().value
# except Exception:
# return self.name
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def portalboxes_ln(self):
return db.object_session(self).query(CollectionPortalbox).\
with_parent(self).\
options(db.joinedload_all(CollectionPortalbox.portalbox)).\
filter(CollectionPortalbox.ln == g.ln).\
order_by(db.desc(CollectionPortalbox.score)).all()
@property
def most_specific_dad(self):
results = sorted(
db.object_session(self).query(Collection).join(
Collection.sons
).filter(CollectionCollection.id_son == self.id).all(),
key=lambda c: c.nbrecs)
return results[0] if len(results) else None
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def is_restricted(self):
"""Return ``True`` if the collection is restricted."""
from invenio.legacy.search_engine import collection_restricted_p
return collection_restricted_p(self.name)
@property
def type(self):
"""Return relation type."""
p = re.compile("\d+:.*")
if self.dbquery is not None and \
p.match(self.dbquery.lower()):
return 'r'
else:
return 'v'
_collection_children = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: Collection.id == CollectionCollection.id_dad,
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
_collection_children_r = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: db.and_(
Collection.id == CollectionCollection.id_dad,
CollectionCollection.type == 'r'),
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
_collection_children_v = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: db.and_(
Collection.id == CollectionCollection.id_dad,
CollectionCollection.type == 'v'),
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
collection_parents = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: Collection.id == CollectionCollection.id_son,
foreign_keys=lambda: CollectionCollection.id_son,
order_by=lambda: db.asc(CollectionCollection.score)
)
collection_children = ass
|
ebertti/django-admin-easy
|
test_project/settings.py
|
Python
|
mit
| 2,792
| 0
|
"""
Django settings for sample_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hj6+-%d0cv@&x%bbb1_t%^+#lkuk2+-5@uci#zrt&xdw2ki&y*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'easy',
'test_app',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR, '/static')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context
|
_processors.debug',
|
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
|
brad-kaiser/spark
|
python/pyspark/sql/catalog.py
|
Python
|
apache-2.0
| 11,982
| 0.00192
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from collections import namedtuple
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.udf import UserDefinedFunction
from pyspark.sql.types import IntegerType, StringType, StructType
Database = namedtuple("Database", "name description locationUri")
Table = namedtuple("Table", "name database description tableType isTemporary")
Column = namedtuple("Column", "name description dataType nullable isPartition isBucket")
Function = namedtuple("Function", "name description className isTemporary")
class Catalog(object):
"""User-facing catalog API, accessible through `SparkSession.catalog`.
This is a thin wrapper around its Scala implementation org.apache.spark.sql.catalog.Catalog.
"""
def __init__(self, sparkSession):
"""Create a new Catalog that wraps the underlying JVM object."""
self._sparkSession = sparkSession
self._jsparkSession = sparkSession._jsparkSession
self._jcatalog = sparkSession._jsparkSession.catalog()
@ignore_unicode_prefix
@since(2.0)
def currentDatabase(self):
"""Returns the current default database in this session."""
return self._jcatalog.currentDatabase()
@ignore_unicode_prefix
@since(2.0)
def setCurrentDatabase(self, dbName):
"""Sets the current default database in this session."""
return self._jcatalog.setCurrentDatabase(dbName)
@ignore_unicode_prefix
@since(2.0)
def listDatabases(self):
"""Returns a list of databases available across all sessions."""
iter = self._jcatalog.listDatabases().toLocalIterator()
databases = []
while iter.hasNext():
jdb = iter.next()
databases.append(Database(
name=jdb.name(),
description=jdb.description(),
locationUri=jdb.locationUri()))
return databases
@ignore_unicode_prefix
@since(2.0)
def listTables(self, dbName=None):
"""Returns a list of tables/views in the specified database.
If no database is specified, the current database is used.
This includes all temporary views.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listTables(dbName).toLocalIterator()
tables = []
while iter.hasNext():
jtable = iter.next()
tables.append(Table(
name=jtable.name(),
database=jtable.database(),
description=jtable.description(),
tableType=jtable.tableType(),
isTemporary=jtable.isTemporary()))
return tables
@ignore_unicode_prefix
@since(2.0)
def listFunctions(self, dbName=None):
"""Returns a list of functions registered in the specified database.
If no database is specified, the current database is used.
This includes all temporary functions.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listFunctions(dbName).toLocalIterator()
functions = []
while iter.hasNext():
jfunction = iter.next()
functions.append(Function(
name=jfunction.name(),
description=jfunction.description(),
className=jfunction.className(),
isTemporary=jfunction.isTemporary()))
return functions
@ignore_unicode_prefix
@since(2.0)
def listColumns(self, tableName, dbName=None):
"""Returns a list of columns for the given table/view in the specified database.
If no database is specified, the current database is used.
Note: the order of arguments here is different from that of its JVM counterpart
because Python does not support method overloading.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listColumns(dbName, tableName).toLocalIterator()
columns = []
while iter.hasNext():
jcolumn = iter.next()
columns.append(Column(
name=jcolumn.name(),
description=jcolumn.description(),
dataType=jcolumn.dataType(),
nullable=jcolumn.nullable(),
isPartition=jcolumn.isPartition(),
isBucket=jcolumn.isBucket()))
return columns
@since(2.0)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of
|
the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
warnings.warn(
"createExternalTable is deprecated since Spark 2.2, please use createTable instead.",
DeprecationWarning)
return self.createTable(tableName, path, source, schema, **options)
@since(2.2)
def createTa
|
ble(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used. When ``path`` is specified, an external table is
created from the data at the given path. Otherwise a managed table is created.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created table.
:return: :class:`DataFrame`
"""
if path is not None:
options["path"] = path
if source is None:
source = self._sparkSession.conf.get(
"spark.sql.sources.default", "org.apache.spark.sql.parquet")
if schema is None:
df = self._jcatalog.createTable(tableName, source, options)
else:
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
scala_datatype = self._jsparkSession.parseDataType(schema.json())
df = self._jcatalog.createTable(tableName, source, scala_datatype, options)
return DataFrame(df, self._sparkSession._wrapped)
@since(2.0)
def dropTempView(self, viewName):
"""Drops the local temporary view with the given view name in the catalog.
If the view has been cached before, then it will also be uncached.
Returns true if this view is dropped successfully, false otherwise.
Note that, the return type of this method was None in Spark 2.0, but changed to Boolean
in Spark 2.1.
>>> spark.createDataFrame([(1, 1)]).createTempView("my_table")
>>> spark.table("my_table").collect()
[Row(_1=1, _2=1)]
>>> spark.catalog.dropTem
|
OCA/reporting-engine
|
report_py3o_fusion_server/tests/__init__.py
|
Python
|
agpl-3.0
| 158
| 0
|
# Copyr
|
ight 2017 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_report_py3o_fusion_serve
|
r
|
nlamirault/python-freeboxclient
|
freeboxclient/client.py
|
Python
|
apache-2.0
| 3,447
| 0.00029
|
#
# Copyright 2013 Nicolas Lamirault <nicolas.lamirault@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from cliff import command
logger = logging.getLogger(__name__)
class FreeboxCommand(command.Command):
"""Default Freebox command."""
pass
class FreeboxApiVersion(FreeboxCommand):
"""Retrieve the Freebox OS api version."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] API_Version")
api_version = self.app.freebox_client.version()
#print "Result: %s" % api_version
logger.info('[FreeboxOS] %s\n' % api_version['api_version'])
class FreeboxLogin(FreeboxCommand):
"""Login to the Freebox OS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Login")
self.app.freebox_client.login()
# self.app.stdout.write('FreeboxOS: %s\n' %
# self.app.freebox_client)
logger.info('[FreeboxOS] Login response: %s' % self.app.freebox_client)
class FreeboxAuthorize(FreeboxCommand):
"""Request authorization for this application."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Authorization request")
self.app.freebox_client.ask_authorization()
class FreeboxCheckAuthorization(FreeboxCommand):
"""Request informations about authorization for this application."""
def take_action(self, parsed_args)
|
:
logger.info("[FreeboxOS] Check Authorization ")
self.app.freebox_client.check_authorization()
class FreeboxOpenSession(FreeboxCommand):
"""Open a new session to the FreeboxOS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Open sesion")
self.app.freebox_client.
|
open_session()
class FreeboxCloseSession(FreeboxCommand):
"""Close the current session to the FreeboxOS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Close sesion")
self.app.freebox_client.close_session()
class FreeboxWifiStatus(FreeboxCommand):
"""Retrieve the WIFI status."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi status")
wifi_status = self.app.freebox_client.get_wifi_status()
logger.info("[FreeboxOS] Wifi status:\n %s" % wifi_status)
class FreeboxWifiConfiguration(FreeboxCommand):
"""Retrieve the current WIFI configuration."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi configuration")
wifi_config = self.app.freebox_client.get_wifi_config()
logger.info("[FreeboxOS] Wifi configuration:\n %s" % wifi_config)
class FreeboxWifiStations(FreeboxCommand):
"""Retrieve a list of wifi stations."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi stations")
wifi_stations = self.app.freebox_client.get_wifi_stations()
logger.info("[FreefoxOS] Wifi stations:\n %s" % wifi_stations)
|
Bartzi/LabShare
|
labshare/migrations/0013_initial_groups.py
|
Python
|
gpl-2.0
| 566
| 0.001767
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import Group
from django.db import migrations
def initial_data(apps, schema_editor):
staff = Group.objects.c
|
reate(name="Staff")
staff.save()
def delete_staff_group(apps, schema_editor):
staff = Group.objects.get(name="Staff")
staff.delete()
class Migration(migrations.Migration):
dependencies = [
('labshare', '0012_auto_20161026_1453'),
]
operations = [
migrations.RunPython(initial_data, dele
|
te_staff_group),
]
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/Cura/cura_sf/skeinforge_application/skeinforge_plugins/craft_plugins/raft.py
|
Python
|
agpl-3.0
| 56,776
| 0.02251
|
"""
This page is in the table of contents.
Raft is a plugin to create a raft, elevate the nozzle and set the temperature. A raft is a flat base structure on top of which your object is being build and has a few different purposes. It fills irregularities like scratches and pits in your printbed and gives you a nice base parallel to the printheads movement. It also glues your object to the bed so to prevent warping in bigger object. The rafts base layer performs these tricks while the sparser interface layer(s) help you removing the object from the raft after printing. It is based on the Nophead's reusable raft, which has a base layer running one way, and a couple of perpendicular layers above. Each set of layers can be set to a different temperature. There is the option of having the extruder orbit the raft for a while, so the heater barrel has time to reach a different temperature, without ooze accumulating around the nozzle.
The raft manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Raft
The important values for the raft settings are the temperatures of the raft, the first layer and the next layers. These will be different for each material. The default settings for ABS, HDPE, PCL & PLA are extrapolated from Nophead's experiments.
You don't necessarily need a raft and especially small object will print fine on a flat bed without one, sometimes its even better when you need a water tight base to print directly on the bed. If you want to only set the temperature or only create support material or only elevate the nozzle without creating a raft, set the Base Layers and Interface Layers to zero.
<gallery perRow="1">
Image:Raft.jpg|Raft
</gallery>
Example of a raft on the left with the interface layers partially removed exposing the base layer. Notice that the first line of the base is rarely printed well because of the startup time of the extruder. On the right you see an object with its raft still attached.
The Raft panel has some extra settings, it probably made sense to have them there but they have not that much to do with the actual Raft. First are the Support material settings. Since close to all RepRap style printers have no second extruder for support material Skeinforge offers the option to print support structures with the same material set at a different speed and temperature. The idea is that the support sticks less to the actual object when it is extruded around the minimum possible working temperature. This results in a temperature change EVERY layer so build time will increase seriously.
Allan Ecker aka The Masked Retriever's has written two quicktips for raft which follow below.
"Skeinforge Quicktip: The Raft, Part 1" at:
http://blog.thingiverse.com/2009/07/14/skeinforge-quicktip-the-raft-part-1/
"Skeinforge Quicktip: The Raft, P
|
art II" at:
http://blog.thingiverse.com/2009/08/04/skeinforge-quicktip-the-raft-part-ii/
Nophead has written about rafts on his blog:
http://hydraraptor.blogspot.com/2009/07/thoughts-on-rafts.html
More pictures of rafting in action are available from the Metalab blog at:
http://reprap.soup.io/?search=rafting
==Operation==
Default: On
When it is on,
|
the functions described below will work, when it is off, nothing will be done, so no temperatures will be set, nozzle will not be lifted..
==Settings==
===Add Raft, Elevate Nozzle, Orbit===
Default: On
When selected, the script will also create a raft, elevate the nozzle, orbit and set the altitude of the bottom of the raft. It also turns on support generation.
===Base===
Base layer is the part of the raft that touches the bed.
====Base Feed Rate Multiplier====
Default is one.
Defines the base feed rate multiplier. The greater the 'Base Feed Rate Multiplier', the thinner the base, the lower the 'Base Feed Rate Multiplier', the thicker the base.
====Base Flow Rate Multiplier====
Default is one.
Defines the base flow rate multiplier. The greater the 'Base Flow Rate Multiplier', the thicker the base, the lower the 'Base Flow Rate Multiplier', the thinner the base.
====Base Infill Density====
Default is 0.5.
Defines the infill density ratio of the base of the raft.
====Base Layer Height over Layer Thickness====
Default is two.
Defines the ratio of the height & width of the base layer compared to the height and width of the object infill. The feed rate will be slower for raft layers which have thicker extrusions than the object infill.
====Base Layers====
Default is one.
Defines the number of base layers.
====Base Nozzle Lift over Base Layer Thickness====
Default is 0.4.
Defines the amount the nozzle is above the center of the base extrusion divided by the base layer thickness.
===Initial Circling===
Default is off.
When selected, the extruder will initially circle around until it reaches operating temperature.
===Infill Overhang over Extrusion Width===
Default is 0.05.
Defines the ratio of the infill overhang over the the extrusion width of the raft.
===Interface===
====Interface Feed Rate Multiplier====
Default is one.
Defines the interface feed rate multiplier. The greater the 'Interface Feed Rate Multiplier', the thinner the interface, the lower the 'Interface Feed Rate Multiplier', the thicker the interface.
====Interface Flow Rate Multiplier====
Default is one.
Defines the interface flow rate multiplier. The greater the 'Interface Flow Rate Multiplier', the thicker the interface, the lower the 'Interface Flow Rate Multiplier', the thinner the interface.
====Interface Infill Density====
Default is 0.5.
Defines the infill density ratio of the interface of the raft.
====Interface Layer Thickness over Extrusion Height====
Default is one.
Defines the ratio of the height & width of the interface layer compared to the height and width of the object infill. The feed rate will be slower for raft layers which have thicker extrusions than the object infill.
====Interface Layers====
Default is two.
Defines the number of interface layers to print.
====Interface Nozzle Lift over Interface Layer Thickness====
Default is 0.45.
Defines the amount the nozzle is above the center of the interface extrusion divided by the interface layer thickness.
===Name of Alteration Files===
If support material is generated, raft looks for alteration files in the alterations folder in the .skeinforge folder in the home directory. Raft does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
====Name of Support End File====
Default is support_end.gcode.
If support material is generated and if there is a file with the name of the "Name of Support End File" setting, it will be added to the end of the support gcode.
====Name of Support Start File====
If support material is generated and if there is a file with the name of the "Name of Support Start File" setting, it will be added to the start of the support gcode.
===Operating Nozzle Lift over Layer Thickness===
Default is 0.5.
Defines the amount the nozzle is above the center of the operating extrusion divided by the layer height.
===Raft Size===
The raft fills a rectangle whose base size is the rectangle around the bottom layer of the object expanded on each side by the 'Raft Margin' plus the 'Raft Additional Margin over Length (%)' percentage times the length of the side.
====Raft Additional Margin over Length====
Default is 1 percent.
====Raft Margin====
Default is three millimeters.
===Support===
Good articles on support material are at:
http://davedurant.wordpress.com/2010/07/31/skeinforge-support-part-1/
http://davedurant.wordpress.com/2010/07/31/skeinforge-support-part-2/
====Support Cross Hatch====
Default is off.
When selected, the support material will cross hatched. Cross hatching the support makes it stronger and harder to remove, which is why the default is off.
====Support Flow Rate over Operating Flow Rate====
Default: 0.9.
Defines the ratio of the flow rate when the support is extruded over th
|
beobal/cassandra-dtest
|
upgrade_internal_auth_test.py
|
Python
|
apache-2.0
| 9,980
| 0.002405
|
import time
import pytest
import logging
from cassandra import Unauthorized
from ccmlib.common import is_w
|
in
from ccmlib.node import Node
from dtest_setup_overrides import DTestSetupOverrides
from dtest import
|
Tester
from tools.assertions import assert_all, assert_invalid
from tools.misc import ImmutableMapping
since = pytest.mark.since
logger = logging.getLogger(__name__)
@pytest.mark.upgrade_test
@since('2.2')
class TestAuthUpgrade(Tester):
@pytest.fixture(scope='function', autouse=True)
def fixture_dtest_setup_overrides(self, dtest_config):
dtest_setup_overrides = DTestSetupOverrides()
dtest_setup_overrides.cluster_options = ImmutableMapping({'authenticator': 'PasswordAuthenticator',
'authorizer': 'CassandraAuthorizer'})
return dtest_setup_overrides
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
)
def test_upgrade_to_22(self):
self.do_upgrade_with_internal_auth("github:apache/cassandra-2.2")
@since('3.0')
@pytest.mark.no_offheap_memtables
def test_upgrade_to_30(self):
self.do_upgrade_with_internal_auth("github:apache/cassandra-3.0")
@since('2.2', max_version='3.X')
def test_upgrade_legacy_table(self):
"""
Upgrade with bringing up the legacy tables after the newer nodes (without legacy tables)
were started.
@jira_ticket CASSANDRA-12813
"""
cluster = self.cluster
# Forcing cluster version on purpose
cluster.set_install_dir(version="2.1.16")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# Wait for default user to get created on one of the nodes
time.sleep(15)
# Upgrade to current version
for node in [node1, node2, node3]:
node.drain()
node.watch_log_for("DRAINED")
node.stop(gently=True)
self.set_node_to_current_version(node)
cluster.start()
# Make sure the system_auth table will get replicated to the node that we're going to replace
session = self.patient_cql_connection(node1, user='cassandra', password='cassandra')
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 };")
cluster.repair()
cluster.stop()
# Replace the node
cluster.seeds.remove(node1)
cluster.remove(node1)
replacement_address = node1.address()
replacement_node = Node('replacement', cluster=self.cluster, auto_bootstrap=True,
thrift_interface=(replacement_address, 9160),
storage_interface=(replacement_address, 7000),
jmx_port='7400', remote_debug_port='0', initial_token=None,
binary_interface=(replacement_address, 9042))
self.set_node_to_current_version(replacement_node)
cluster.add(replacement_node, True)
replacement_node.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
replacement_node.watch_log_for('Initializing system_auth.credentials')
replacement_node.watch_log_for('Initializing system_auth.permissions')
replacement_node.watch_log_for('Initializing system_auth.users')
cluster.repair()
replacement_node.watch_log_for('Repair command')
# Should succeed. Will throw an NPE on pre-12813 code.
self.patient_cql_connection(replacement_node, user='cassandra', password='cassandra')
def do_upgrade_with_internal_auth(self, target_version):
"""
Tests upgrade between 2.1->2.2 & 2.1->3.0 as the schema and apis around authn/authz changed
@jira_ticket CASSANDRA-7653
"""
cluster = self.cluster
# Forcing cluster version on purpose
cluster.set_install_dir(version="github:apache/cassandra-2.1")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# wait for default superuser creation
# The log message
# node.watch_log_for('Created default superuser')
# will only appear on one of the three nodes, and we don't know
# which ahead of time. Grepping all three in parallel is unpleasant.
# See auth_test and auth_roles test for instances of this as well.
# Should be fixed by C*-6177
time.sleep(15)
session = self.patient_cql_connection(node1, user='cassandra', password='cassandra')
session.execute("CREATE USER klaus WITH PASSWORD '12345' SUPERUSER")
session.execute("CREATE USER michael WITH PASSWORD '54321' NOSUPERUSER")
session.execute("CREATE KEYSPACE ks WITH replication = {'class':'SimpleStrategy', 'replication_factor':1}")
session.execute("CREATE TABLE ks.cf1 (id int primary key, val int)")
session.execute("CREATE TABLE ks.cf2 (id int primary key, val int)")
session.execute("GRANT MODIFY ON ks.cf1 TO michael")
session.execute("GRANT SELECT ON ks.cf2 TO michael")
self.check_permissions(node1, False)
session.cluster.shutdown()
# upgrade node1 to 2.2
self.upgrade_to_version(target_version, node1)
# run the permissions checking queries on the upgraded node
# this will be using the legacy tables as the conversion didn't complete
# but the output format should be updated on the upgraded node
self.check_permissions(node1, True)
# and check on those still on the old version
self.check_permissions(node2, False)
self.check_permissions(node3, False)
# now upgrade the remaining nodes
self.upgrade_to_version(target_version, node2)
self.upgrade_to_version(target_version, node3)
self.check_permissions(node2, True)
self.check_permissions(node3, True)
# we should now be able to drop the old auth tables
session = self.patient_cql_connection(node1, user='cassandra', password='cassandra')
session.execute('DROP TABLE system_auth.users', timeout=60)
session.execute('DROP TABLE system_auth.credentials', timeout=60)
session.execute('DROP TABLE system_auth.permissions', timeout=60)
# and we should still be able to authenticate and check authorization
self.check_permissions(node1, True)
logger.debug('Test completed successfully')
def check_permissions(self, node, upgraded):
# use an exclusive connection to ensure we only talk to the specified node
klaus = self.patient_exclusive_cql_connection(node, user='klaus', password='12345', timeout=20)
# klaus is a superuser, so should be able to list all permissions
# the output of LIST PERMISSIONS changes slightly with #7653 adding
# a new role column to results, so we need to tailor our check
# based on whether the node has been upgraded or not
if not upgraded:
assert_all(klaus,
'LIST ALL PERMISSIONS',
[['michael', '<table ks.cf1>', 'MODIFY'],
['michael', '<table ks.cf2>', 'SELECT']],
timeout=60)
else:
assert_all(klaus,
'LIST ALL PERMISSIONS',
[['michael', 'michael', '<table ks.cf1>', 'MODIFY'],
['michael', 'michael', '<table ks.cf2>', 'SELECT']],
timeout=60)
klaus.cluster.shutdown()
michael = self.patient_exclusive_cql_connection(node, user='michael', password='54321')
michael.execute('INSERT INT
|
mediawiki-utilities/python-mediawiki-utilities
|
doc/conf.py
|
Python
|
mit
| 8,467
| 0.006023
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mediawiki-utilities documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 10 17:31:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import mw
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mediawiki-utilities'
copyright = '2014, Aaron Halfaker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# bui
|
lt documents.
#
# The short X.Y version.
version = mw.__version__
# The full version, including alpha/beta/rc tags.
release = mw.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d
|
, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mediawiki-utilitiesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'mediawiki-utilities.tex', 'mediawiki-utilities Documentation',
'Aaron Halfaker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mediawiki-utilities', 'mediawiki-utilities Documentation',
['Aaron Halfaker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mediawiki-utilities', 'mediawiki-utilities Documentation',
'Aaron Halfaker', 'mediawiki-utilities', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinf
|
twitter-forks/bazel
|
tools/ctexplain/types_test.py
|
Python
|
apache-2.0
| 2,530
| 0.001581
|
# Lint as: python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for types.py."""
import unittest
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from frozendict import frozendict
from tools.ctexplain.types import Configuration
class TypesTest(unittest.TestCase):
def testConfigurationIsHashable(self):
options = frozendict({'o1': frozendict({'k1': 'v1'})})
c = Configuration(fragments=('F1'), options=options)
some_dict = {}
some_dict[c] = 4
def testConfigurationHashAccuracy(self):
d = {}
options1 = frozendict({'o1': frozendict({'k1': 'v1'})})
d[Configuration(fragments=('F1'), options=options1)] = 4
self.assertEqual(len(d), 1)
options2 = frozendict({'o1': frozendict({'k1': 'v1'})})
d[Con
|
figuration(fragments=('F1'), options=options2)] = 4
self.assertEqual(len(d), 1)
options3 = frozendict({'o1': frozendict({'k1': 'v1'})})
d[Configuration(fragments=('F2'), options=options3)] = 4
self.assertEqual(len(d), 2)
options4 = frozendict({'o2': frozendict({'k1': 'v1'})})
d[Configuration(fragments=('F2'), options=options4)] = 4
self.assertEqual(len(d), 3)
options5 = frozendict({'o2': frozendict({'k2':
|
'v1'})})
d[Configuration(fragments=('F2'), options=options5)] = 4
self.assertEqual(len(d), 4)
options6 = frozendict({'o2': frozendict({'k2': 'v2'})})
d[Configuration(fragments=('F2'), options=options6)] = 4
self.assertEqual(len(d), 5)
def testConfigurationEquality(self):
c1 = Configuration(fragments=('F1'), options={'o1': {'k1': 'v1'}})
c2 = Configuration(fragments=('F1'), options={'o1': {'k1': 'v1'}})
c3 = Configuration(fragments=('F2'), options={'o1': {'k1': 'v1'}})
c4 = Configuration(fragments=('F1'), options={'o2': {'k2': 'v2'}})
self.assertEqual(c1, c2)
self.assertNotEqual(c1, c3)
self.assertNotEqual(c1, c4)
self.assertNotEqual(c3, c4)
if __name__ == '__main__':
unittest.main()
|
ego008/ijd8
|
sae/setting.py
|
Python
|
mit
| 1,814
| 0.024441
|
# -*- coding: utf-8 -*-
import sae.const
DEBUG = False
SITE_TITLE = u"博客标题"
SITE_SUB_TITLE = u"博客副标题"
SITE_KEYWORDS = u"博客关键字"
SITE_DECRIPTION = u"博客描述"
AUTHOR_NAME = u"博客作者" #显示在RSS订阅里面
#CONACT_MAIL = "xxx@gmail.com" #暂未用到
THEMES = ['octopress','admin']
LINK_BROLL = [
{'text': u"爱简单吧", 'url': "http://www.ijd8.com", 'title': u"ijd8官方博客"},
{'text': u"YouBBS", 'url': "http://youbbs.sinaapp.com", 'title': u"ijd8支持论坛"},
]
MAJOR_DOMAIN = 'www.yourdomain.com' #主域名
##Mysql 数据库信息
MYSQL_DB = sae.const.MYSQL_DB
MYSQL_USER = sae.const.MYSQL_USER
MYSQL_PASS = sae.const.MYSQL_PASS
MYSQL_HOST = "%s:%s" % (sae.const.MYSQL_HOST_S, sae.const.MYSQL_PORT)
MYSQL_HOST_M = "%s:%s" % (sae.const.MYSQL_HOST, sae.const.MYSQL_PORT)
JQUERY = "http://lib.sinaapp.com/js/jquery/1.9.1/jquery-1.9.1.min.js"
COOKIE_SECRET = "11orTzKXQAsaYdkL5gEtGeJJFuYh7EQnp2XdTP1o/Vo="
LANGUAGE = 'zh-CN'
EACH_PAGE_POST_NUM = 10 #每页显示文章数
RECENT_POST_NUM = 10 #边栏显示最近文章数
RELATED_NUM = 10 #显示相关文章数
SIDER_TAG_NUM = 100 #边栏显示标签数
SIDER_CAT_NUM = 100 #边栏显示分类数
SHORTEN_CONTENT_WORDS = 150 #文章列表截取的字符数
DESCRIPTION_CUT_WORDS = 100 #meta description 显示的字符数
FEED_NUM = 10 #订阅输出
|
文章数
#######下面是保存附件的空间,可选SAE Storage 和 七牛(有免费配额),只选一个
## 1) 用SAE Storage 需要在SAE 控制面板开通
BUCKET = "" #Domain Name, 如 upload 。不用或用七牛请留空
## 2) 七牛 注册可获永久10G空间和每月10G流量,注册地址 http://t
|
.cn/z8h5lsg
QN_AK = "" #七牛 ACCESS_KEY
QN_SK = "" #七牛 SECRET_KEY
QN_BUCKET = "" #空间名称 , 如 upload
|
camradal/ansible
|
lib/ansible/modules/cloud/amazon/lambda_alias.py
|
Python
|
gpl-3.0
| 12,318
| 0.002598
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: lambda_alias
short_description: Creates, updates or deletes AWS Lambda function aliases.
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
itself and M(lambda_event) to manage event source mappings.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
description:
description:
- A short, user-defined function alias description.
required: false
version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
required: false
aliases: ['function_version']
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: show results
debug:
var: lambda_facts
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
-
|
name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_facts.Version }}"
description: "QA is version {{ lambda_facts.Version }}"
when: lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod
|
' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: string
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: string
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: string
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: string
sample: dev
'''
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, aws):
"""
Returns the lambda function alias if it exists.
:param module: Ansible module reference
:param aws:
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/ipware/utils.py
|
Python
|
agpl-3.0
| 794
| 0
|
import socket
def is_valid_ipv4(ip_str):
"""
Check the validity of an IPv4 address
"""
try:
socket.inet_pton(socket.AF_INET, ip_str)
except AttributeError:
try: # Fall-back on legacy API or False
socket.inet_aton(ip_str)
except (AttributeError, socket.error):
|
return False
return ip_str.count('.') == 3
except socket.error:
return False
return True
def is_valid_ipv6(ip_str):
"""
Check the validity of an IPv6 address
"""
try:
socket.inet_pton(socket.AF_INET6, ip_str)
except socket.error:
r
|
eturn False
return True
def is_valid_ip(ip_str):
"""
Check the validity of an IP address
"""
return is_valid_ipv4(ip_str) or is_valid_ipv6(ip_str)
|
onepercentclub/onepercentclub-site
|
apps/homepage/serializers.py
|
Python
|
bsd-3-clause
| 794
| 0.001259
|
from bluebottle.bb_projects.serializers import Pro
|
jectPreviewSerializer
from bluebottle.quotes.serializers import QuoteSerializer
from bluebottle.slides.serializers import SlideSerializer
|
from apps.campaigns.serializers import CampaignSerializer
from bluebottle.bb_fundraisers.serializers import BaseFundRaiserSerializer
from apps.statistics.serializers import StatisticSerializer
from rest_framework import serializers
class HomePageSerializer(serializers.Serializer):
quotes = QuoteSerializer(source='quotes')
slides = SlideSerializer(source='slides')
impact = StatisticSerializer(source='stats')
projects = ProjectPreviewSerializer(source='projects')
campaign = CampaignSerializer(source='campaign')
fundraisers = BaseFundRaiserSerializer(source='fundraisers')
|
ntt-sic/taskflow
|
taskflow/tests/unit/test_unordered_flow.py
|
Python
|
apache-2.0
| 2,294
| 0
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import taskflow.engines
from taskflow.patterns import unordered_flow as uf
from taskflow import task
from taskflow import test
from taskflow.tests import utils
class UnorderedFlowTest(test.TestCase):
def _make_engine(self, flow):
return taskflow.engines.load(flow, store={'context': {}})
def test_result_access(self):
class DoApply(task.Task):
default_provides = ('a', 'b')
def execute(self):
return [1, 2]
wf = uf.Flow("the-test-action")
wf.add(DoApply())
e = self._make_engine(wf)
e.run()
data = e.storage.fetch_all()
self.assertIn('a', data)
self.assertIn('b', data)
self.assertEquals(2, data['b'])
self.assertEquals(1, data['a'])
def test_reverting_flow(self):
wf = uf.Flow("the-test-action")
wf.add(utils.make_reverting_task('1'))
wf.add(utils.make_reverting_task('2', blowup=True))
e = self._make_engine(wf)
self.assertRaises(Exception, e.run)
def test_functor_flow(self):
class DoApply1(task.Task):
default_provides = ('a', 'b', 'c')
def execute(self, context):
context['1'] = True
return ['a', 'b', 'c']
class DoApply2(task.Task):
def execute(self, context):
context['2'] = True
wf = uf.Flow("the-test-action")
wf.add(DoApply1())
wf.add(DoApply2())
e = s
|
elf._make_engine(wf)
|
e.run()
self.assertEquals(2, len(e.storage.fetch('context')))
|
fusionbox/satchless
|
satchless/order/app.py
|
Python
|
bsd-3-clause
| 2,122
| 0.001414
|
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from ..core.app import SatchlessApp
from . import models
class OrderApp(SatchlessApp):
app_name = 'order'
namespace = 'order'
order_model = models.Order
order_details_templates = [
'satchless/order/view.html',
'satchless/order/%(or
|
der_model)s/view.html'
]
order_list_templates = [
'satchless/order/my_orders.html',
'satchless/order/%(order_model)s/my_orders.html'
]
@method_decorator(login_required)
def index(self, request):
orders = self.order_model.objects.filter(user=request.user)
|
context = self.get_context_data(request, orders=orders)
format_data = {
'order_model': self.order_model._meta.model_name
}
templates = [p % format_data for p in self.order_list_templates]
return TemplateResponse(request, templates, context)
def details(self, request, order_token):
order = self.get_order(request, order_token=order_token)
context = self.get_context_data(request, order=order)
format_data = {
'order_model': order._meta.model_name
}
templates = [p % format_data for p in self.order_details_templates]
return TemplateResponse(request, templates, context)
def get_order(self, request, order_token):
if request.user.is_authenticated():
orders = self.order_model.objects.filter(user=request.user)
else:
orders = self.order_model.objects.filter(user=None)
order = get_object_or_404(orders, token=order_token)
return order
def get_urls(self, prefix=None):
prefix = prefix or self.app_name
return patterns('',
url(r'^$', self.index, name='index'),
url(r'^(?P<order_token>[0-9a-zA-Z]+)/$', self.details,
name='details'),
)
order_app = OrderApp()
|
tbabej/freeipa
|
ipaserver/install/installutils.py
|
Python
|
gpl-3.0
| 48,113
| 0.00158
|
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import print_function
import errno
import socket
import getpass
import gssapi
import ldif
import os
import re
import fileinput
import sys
import tempfile
import shutil
import traceback
import textwrap
from contextlib import contextmanager
from dns import resolver, rdatatype
from dns.exception import DNSException
import ldap
import ldapurl
import six
from six.moves.configparser import SafeConfigParser, NoOptionError
import ipaplatform
from ipapython import ipautil, sysrestore, admintool, version
from ipapython.admintool import ScriptError
from ipapython.ipa_log_manager import root_logger
from ipalib.util import validate_hostname
from ipapython import config
from ipalib import api, errors, x509
from ipapython.dn import DN
from ipaserver.install import certs, service, sysupgrade
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
if six.PY3:
unicode = str
# Used to determine install status
IPA_MODULES = [
'httpd', 'kadmin', 'dirsrv', 'pki-tomcatd', 'install', 'krb5kdc', 'ntpd',
'named', 'ipa_memcached']
class BadHostError(Exception):
pass
class HostLookupError(BadHostError):
pass
class HostForwardLookupError(HostLookupError):
pass
class HostReverseLookupError(HostLookupError):
pass
class HostnameLocalhost(HostLookupError):
pass
class UpgradeVersionError(Exception):
pass
class UpgradePlatformError(UpgradeVersionError):
pass
class UpgradeDataOlderVersionError(UpgradeVersionError):
pass
class UpgradeDataNewerVersionError(UpgradeVersionError):
pass
class UpgradeMissingVersionError(UpgradeVersionError):
pass
class ReplicaConfig:
def __init__(self, top_dir=None):
self.realm_name = ""
self.domain_name = ""
self.master_host_name = ""
self.dirman_password = ""
self.host_name = ""
self.dir = ""
self.subject_base = None
self.setup_ca = False
self.version = 0
self.top_dir = top_dir
subject_base = ipautil.dn_attribute_property('_subject_base')
def get_fqdn():
fqdn = ""
try:
fqdn = socket.getfqdn()
except Exception:
try:
fqdn = socket.gethostname()
except Exception:
fqdn = ""
return fqdn
def verify_fqdn(host_name, no_host_dns=False, local_hostname=True):
"""
Run fqdn checks for given host:
- test hostname format
- test that hostname is fully qualified
- test forward and reverse hostname DNS lookup
Raises `BadHostError` or derived Exceptions if there is an error
:param host_name: The host name to verify.
:param no_host_dns: If true, skip DNS resolution tests of the host name.
:param local_hostname: If true, run additional checks for local hostnames
"""
if len(host_name.split(".")) < 2 or host_name == "localhost.localdomain":
raise BadHostError("Invalid hostname '%s', must be fully-qualified." % host_name)
if host_name != host_name.lower():
raise BadHostError("Invalid hostname '%s', must be lower-case." % host_name)
if ipautil.valid_ip(host_name):
raise BadHostError("IP address not allowed as a hostname")
try:
# make sure that the host name meets the requirements in ipalib
validate_hostname(host_name)
except ValueError as e:
raise BadHostError("Invalid hostname '%s', %s" % (host_name, unicode(e)))
if local_hostname:
try:
root_logger.debug('Check if %s is a primary hostname for localhost', host_name)
ex_name = socket.gethostbyaddr(host_name)
root_logger.debug('Primary hostname for localhost: %s', ex_name[0])
if host_name != ex_name[0]:
raise HostLookupError("The host name %s does not match the primary host name %s. "\
"Please check /etc/hosts or DNS name resolution" % (host_name, ex_name[0]))
except socket.gaierror:
pass
except socket.error as e:
root_logger.debug(
'socket.gethostbyaddr() error: %d: %s',
e.errno, e.strerror) # pylint: disable=no-member
if no_host_dns:
print("Warning: skipping DNS resolution of host", host_name)
return
try:
root_logger.debug('Search DNS for %s', host_name)
|
hostaddr = socket.getaddrinfo(host_name, None)
except Exception as e:
root_logger.debug('Search failed: %s', e)
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
|
if len(hostaddr) == 0:
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
# Verify this is NOT a CNAME
try:
root_logger.debug('Check if %s is not a CNAME', host_name)
resolver.query(host_name, rdatatype.CNAME)
raise HostReverseLookupError("The IPA Server Hostname cannot be a CNAME, only A and AAAA names are allowed.")
except DNSException:
pass
# list of verified addresses to prevent multiple searches for the same address
verified = set()
for a in hostaddr:
address = a[4][0]
if address in verified:
continue
if address == '127.0.0.1' or address == '::1':
raise HostForwardLookupError("The IPA Server hostname must not resolve to localhost (%s). A routable IP address must be used. Check /etc/hosts to see if %s is an alias for %s" % (address, host_name, address))
try:
root_logger.debug('Check reverse address of %s', address)
revname = socket.gethostbyaddr(address)[0]
except Exception as e:
root_logger.debug('Check failed: %s', e)
root_logger.error(
"Unable to resolve the IP address %s to a host name, "
"check /etc/hosts and DNS name resolution", address)
else:
root_logger.debug('Found reverse name: %s', revname)
if revname != host_name:
root_logger.error(
"The host name %s does not match the value %s obtained "
"by reverse lookup on IP address %s", host_name, revname,
address)
verified.add(address)
def record_in_hosts(ip, host_name=None, conf_file=paths.HOSTS):
"""
Search record in /etc/hosts - static table lookup for hostnames
In case of match, returns a tuple of ip address and a list of
hostname aliases
When no record is matched, None is returned
:param ip: IP address
:param host_name: Optional hostname to search
:param conf_file: Optional path to the lookup table
"""
hosts = open(conf_file, 'r').readlines()
for line in hosts:
line = line.rstrip('\n')
fields = line.partition('#')[0].split()
if len(fields) == 0:
continue
try:
hosts_ip = fields[0]
names = fields[1:]
if hosts_ip != ip:
continue
if host_name is not None:
if host_name in names:
return (hosts_ip, names)
else:
return None
return (hosts_ip, names)
except IndexError:
print("Warning: Erroneous line '%s' in %s" % (line, conf_f
|
babyliynfg/cross
|
tools/project-creator/Python2.6.6/Lib/test/test_pyclbr.py
|
Python
|
mit
| 7,874
| 0.002159
|
'''
Test cases for pyclbr.py
Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import sys
from types import ClassType, FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase
StaticMethodType = type(staticmethod(lambda: None))
ClassMethodType = type(classmethod(lambda c: None))
# Silence Py3k warning
import_module('commands', deprecated=True)
# This next line triggers an error on old versions of pyclbr.
from commands import getstatus
# Here we test the python class browser code.
#
# The main function in this suite, 'testModule', compares the output
# of pyclbr with the introspected members of a module. Because pyclbr
# is imperfect (as designed), testModule is called with a set of
# members to ignore.
class PyclbrTest(TestCase):
def assertListEq(self, l1, l2, ignore):
''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
missing = (set(l1) ^ set(l2)) - set(ignore)
if missing:
print >>sys.stderr, "l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore)
self.fail("%r missing" % missing.pop())
def assertHasattr(self, obj, attr, ignore):
''' succeed iff hasattr(obj,attr) or attr in ignore. '''
if attr in ignore: return
if not hasattr(obj, attr): print "???", attr
self.failUnless(hasattr(obj, attr),
'expected hasattr(%r, %r)' % (obj, attr))
def assertHaskey(self, obj, key, ignore):
''' succeed iff key in obj or key in ignore. '''
if key in ignore: return
if key not in obj:
print >>sys.stderr, "***", key
self.assertTrue(key in obj)
def assertEqualsOrIgnored(self, a, b, ignore):
''' succeed iff a == b or a in ignore or b in ignore '''
if a not in ignore and b not in ignore:
self.assertEqual(a, b)
def checkModule(self, moduleName, module=None, ignore=()):
''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
to the actual module object, module. Any identifiers in
ignore are ignored. If no module is provided, the appropriate
module is loaded with __import__.'''
if module is None:
# Import it.
# ('<silly>' is to work around an API silliness in __import__)
module = __import__(moduleName, globals(), {}, ['<silly>'])
dict = pyclbr.readmodule_ex(moduleName)
def ismethod(oclass, obj, name):
classdict = oclass.__dict__
if isinstance(obj, FunctionType):
if not isinstance(classdict[name], StaticMethodType):
return False
else:
if not isinstance(obj, MethodType):
return False
if obj.im_self is not None:
if (not isinstance(classdict[name], ClassMethodType) or
obj.im_self is not oclass):
return False
else:
if not isinstance(classdict[name], FunctionType):
return False
objname = obj.__name__
if objname.startswith("__") and not objname.endswith("__"):
objname = "_%s%s" % (obj.im_class.__name__, objname)
return objname == name
# Make sure the toplevel functions and classes are the same.
for name, value in dict.items():
if name in ignore:
continue
self.assertHasattr(module, name, ignore)
py_item = getattr(module, name)
if isinstance(value, pyclbr.Function):
self.assert_(isinstance(py_item, (FunctionType, BuiltinFunctionType)))
if py_item.__module__ != moduleName:
continue # skip functions that came from somewhere else
self.assertEquals(py_item.__module__, value.module)
else:
self.failUnless(isinstance(py_item, (ClassType, type)))
if py_item.__module__ != moduleName:
continue # skip classes that came from somewhere else
real_bases = [base.__name__ for base in py_item.__bases__]
pyclbr_bases = [ getattr(base, 'name', base)
for base in value.super ]
try:
self.assertListEq(real_bases, pyclbr_bases, ignore)
except:
print >>sys.stderr, "class=%s" % py_item
raise
actualMethods = []
for m in py_item.__dict__.keys():
if ismethod(py_item, getattr(py_item, m), m):
actualMethods.append(m)
foundMethods = []
for m in value.methods.keys():
if m[:2] == '__' and m[-2:] != '__':
foundMethods.append('_'+name+m)
else:
foundMethods.append(m)
try:
self.assertListEq(foundMethods, actualMethods, ignore)
self.assertEquals(py_item.__module__, value.module)
self.assertEqualsOrIgnored(py_item.__name__, value.name,
ignore)
# can't check file or lineno
except:
print >>sys.stderr, "class=%s" % py_item
raise
# Now check for missing stuff.
def defined_in(item, module):
if isinstance(item, Class
|
Type):
ret
|
urn item.__module__ == module.__name__
if isinstance(item, FunctionType):
return item.func_globals is module.__dict__
return False
for name in dir(module):
item = getattr(module, name)
if isinstance(item, (ClassType, FunctionType)):
if defined_in(item, module):
self.assertHaskey(dict, name, ignore)
def test_easy(self):
self.checkModule('pyclbr')
self.checkModule('doctest')
# Silence Py3k warning
rfc822 = import_module('rfc822', deprecated=True)
self.checkModule('rfc822', rfc822)
self.checkModule('difflib')
def test_decorators(self):
# XXX: See comment in pyclbr_input.py for a test that would fail
# if it were not commented out.
#
self.checkModule('test.pyclbr_input')
def test_others(self):
cm = self.checkModule
# These were once about the 10 longest modules
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('cgi', ignore=('log',)) # set with = in module
cm('urllib', ignore=('_CFNumberToInt32',
'_CStringFromCFString',
'_CFSetup',
'getproxies_registry',
'proxy_bypass_registry',
'proxy_bypass_macosx_sysconf',
'open_https',
'getproxies_macosx_sysconf',
'getproxies_internetconfig',)) # not on all platforms
cm('pickle')
cm('aifc', ignore=('openfp',)) # set with = in module
cm('Cookie')
cm('sre_parse', ignore=('dump',)) # from sre_constants import *
cm('pdb')
cm('pydoc')
# Tests for modules inside packages
cm('email.parser')
cm('test.test_pyclbr')
def test_main():
run_unittest(PyclbrTest)
if __name__ == "__main__":
test_main()
|
illicitonion/givabit
|
src/givabit/backend/charity_repository_test.py
|
Python
|
apache-2.0
| 1,597
| 0.004383
|
from givabit.backend.charity import Charity
from givabit.backend.errors import MissingValueException, MultipleValueException
from givabit.test_common import test_data
from givabit.test_common import test_utils
class CharityRepositoryTest(test_utils.TestCase):
def setUp(self):
super(CharityRepositoryTest, self).setUp()
self.all_charities = [test_data.c1, test_data.c2, test_data.c3, test_data.c4]
for charity in self.all_charities:
self.charity_repo.add_or_update_charity(charity)
def test_lists_charities(self):
self.assertSequenceEqual(self.charity_repo.list_charities(), self.all_charities)
def test_gets_single_charity(self):
self.assertEqual(self.cha
|
rity_repo.get_charity('Shelter'), test_data.c1)
self.assertEqual(self.charity_repo.get_charity('Oxfam'), test_data.c2)
with self.assertRaises(MissingValueException):
self.charity_repo.get_charity('Does not exist')
try:
self.charity_repo.get_charity('BHF')
except MultipleValueException, e:
self.assertSequenceEqu
|
al(e.values, [test_data.c3, test_data.c4])
def test_gets_charity_by_id(self):
self.assertEquals(self.charity_repo.get_charity(id=test_data.c1.key().id()), test_data.c1)
def test_getting_missing_charity_by_id_throws(self):
missing_id = 0
while missing_id in map(lambda charity: charity.key().id(), self.all_charities):
missing_id += 1
with self.assertRaises(MissingValueException):
self.charity_repo.get_charity(id=missing_id)
|
VeNoMouS/Sick-Beard
|
lib/cherrypy/lib/cpstats.py
|
Python
|
gpl-3.0
| 22,932
| 0.000218
|
"""CPStats, a package for collecting and reporting on program statistics.
Overview
========
Statistics about program operation are an invaluable monitoring and debugging
tool. Unfortunately, the gathering and reporting of these critical values is
usually ad-hoc. This package aims to add a centralized place for gathering
statistical performance data, a structure for recording that data which
provides for extrapolation of that data into more useful information,
and a method of serving that data to both human investigators and
monitoring software. Let's examine each of those in more detail.
Data Gathering
--------------
Just as Python's `logging` module provides a common importable for gathering
and sending messages, performance statistics would benefit from a similar
common mechanism, and one that does *not* require each package which wishes
to collect stats to import a third-party module. Therefore, we choose to
re-use the `logging` module by adding a `statistics` object to it.
That `logging.statistics` object is a nested dict. It is not a custom class,
because that would:
1. require libraries and applications to import a third-party module in
order to participate
2. inhibit innovation in extrapolation approaches and in reporting tools, and
3. be slow.
There are, however, some specifications regarding the structure of the dict.::
{
+----"SQLAlchemy": {
| "Inserts": 4389745,
| "Inserts per Second":
| lambda s: s["Inserts"] / (time() - s["Start"]),
| C +---"Table Statistics": {
| o | "widgets": {-----------+
N | l | "Rows": 1.3M, | Record
a | l | "Inserts": 400, |
m | e | },---------------------+
e | c | "froobles": {
s | t | "Rows": 7845,
p | i | "Inserts": 0,
a | o | },
c | n +---},
e | "Slow Queries":
| [{"Query": "SELECT * FROM widgets;",
| "Processing Time": 47.840923343,
| },
| ],
+----},
}
The `logging.statistics` dict has four levels. The topmost level is nothing
more than a set of names to introduce modularity, usually along the lines of
package names. If the SQLAlchemy project wanted to participate, for example,
it might populate the item `logging.statistics['SQLAlchemy']`, whose value
would be a second-layer dict we call a "namespace". Namespaces help multiple
packages to avoid collisions over key names, and make reports easier to read,
to boot. The maintainers of SQLAlchemy should feel free to use more than one
namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
or other syntax constraints on the namespace names; they should be chosen
to be maximally readable by humans (neither too short nor too long).
Each namespace, then, is a dict of named statistical values, such as
'Requests/sec' or 'Uptime'. You should choose names which will look
good on a report: spaces and capitalization are just fine.
In addition to scalars, values in a namespace MAY be a (third-layer)
dict, or a list, called a "collection". For example, the CherryPy
:class:`StatsTool` keeps track of what each request is doing (or has most
recently done) in a 'Requests' collection, where each key is a thread ID; each
value in the subdict MUST be a fourth dict (whew!) of statistical data about
each thread. We call each subdict in the collection a "record". Similarly,
the :class:`StatsTool` also keeps a list of slow queries, where each record
contains data about each slow query, in order.
Values in a namespace or record may also be functions, which brings us to:
Extrapolation
-------------
The collection of statistical data needs to be fast, as close to unnoticeable
as possible to the host program. That requires us to minimize I/O, for example,
but in Python it also means we need to minimize function calls. So when you
are designing your namespace and record values, try to insert the most basic
scalar values you already have on hand.
When it comes time to report on the gathered data, however, we usually have
much more freedom in what we can calculate. Therefore, whenever reporting
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents
of `logging.statistics` for reporting, they first call
`extrapolate_statistics` (passing the whole `statistics` dict as the only
argument). This makes a deep copy of the statistics dict so that the
reporting tool can both iterate over it and even change it without harming
the original. But it also expands any functions in the dict by calling them.
For example, you might have a 'Current Time' entry in the namespace with the
value "lambda scope: time.time()". The "scope" parameter is the current
namespace dict (or record, if we're currently expanding one of those
instead), allowing you access to existing static entries. If you're truly
evil, you can even modify more than one entry at a time.
However, don't try to calculate an entry and then use its value in further
extrapolations; the order in which the functions are called is not guaranteed.
This can lead to a certain amount of duplicated work (or a redesign of your
schema), but that's better than complicating the spec.
After the whole thing has been extrapolated, it's time for:
Reporting
---------
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates
it all, and then transforms it to HTML for easy viewing. Each namespace gets
its own header and attribute table, plus an extra table for each collection.
This is NOT part of the statistics specification; other tools can format how
they like.
You can control which columns are output and how they are formatted by updating
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
`logging.statistics`. The difference is that, instead of data values, it has
formatting values. Use None for a given key to indicate to the StatsPage that a
given column should not be output. Use a string with formatting
(such as '%.3f') to interpolate the value(s), or use a callable (such as
lambda v: v.isoformat()) for more advanced formatting. Any entry which is not
mentioned in the formatting dict is output unchanged.
Monitoring
----------
Although the HTML output takes pains to assign unique id's to each <td> with
statistical data, you're probably better off fetching /cpstats/data, which
outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
That is probably easier to parse, and doesn't have any formatting controls,
so you get the "original" data in a consistently-serialized format.
Note: there's no treatment yet for datetime objects. Try time.time() instead
for now if you can. Nagios will probably thank you.
Turning Collection Off
----------------------
It is reco
|
mmended each namespace have an "Enabled" item which, if False,
stops collection (but not reporting) of statistical data. Applications
SHOULD provide controls to pause and resume collection by setting these
entries to False or True, if present.
Usage
==
|
===
To collect statistics on CherryPy applications::
from cherrypy.lib import cpstats
appconfig['/']['tools.cpstats.on'] = True
To collect statistics on your own code::
import logging
# Initialize the repository
if not hasattr(logging, 'statistics'): logging.statistics = {}
# Initialize my namespace
mystats = logging.statistics.setdefault('My Stuff', {})
# Initialize my namespace's scalars and collections
mystats.update({
'Enabled': True,
'Start Time': time.time(),
'Important Events': 0,
'Events/Second': lambda s: (
(s['Important Events'] / (time.time() - s['Start Time']))),
})
...
for event in events:
...
# Collect stats
if mystats.get('Enabled', False):
mystats['Important Events'] += 1
To report statistics::
root.cpstats = cpstats.StatsPage()
To format statistics reports::
See 'Reporting', above.
"""
import logging
import os
import sys
import threading
import time
import cherrypy
from cherrypy._cpcompat
|
lama7/blogtool
|
blogtool/xmlproxy/wp_proxy.py
|
Python
|
mit
| 17,754
| 0.005633
|
import proxybase
import xmlrpclib
import mimetypes
import os
import data
################################################################################
""" getInst
returns an instance of a wpproxy object
"""
def getInst(url, user, password):
wp = WordpressProxy(url, user, password)
return wp
################################################################################
"""WordpressProxy
The following defines a blogproxy class that inherits methods from the
xmlrpclib. To make this work, the __init__ method of the ancestor
class(xmlrpclib.ServerProxy in this case) must be called explicitly as
part of the initialization. From that point, the various server methods
are "directly" accessible through my blogproxy class
"""
class WordpressProxy(proxybase.BlogProxy):
############################################################################
"""getCategories
"""
def getCategories(self):
def _tryMethods(blogid):
try:
response = self.wp.getTerms(blogid,
self._username,
self._password,
'category',
{})
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.getCategories", error)
else:
return [ { 'categoryName' : cat['name'],
'parentId' : cat['parent'],
'categoryId' : cat['term_id'],
'categoryDescription' : cat['description'],} for cat in response ]
# fallback to old method
try:
return self.metaWeblog.getCategories(blogid,
self._username,
self._password)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.getCategories", error)
########################################################################
# getCategories starts here...
if self._categories == None:
self._categories = _tryMethods(self._getBlogID)
return self._categories
############################################################################
"""newCategory
"""
def newCategory(self, newcat, parent, slug='', desc=''):
blogid = self._getBlogID()
# start by trying newer Wordpress API call
term = { 'name' : newcat,
'taxonomy' : 'category',
'slug' : slug,
'description' : desc}
# it appears that if parent is 0, the call won't work to add the
# category, but will work if parent is not present.
if int(parent) != 0:
term['parent'] = int(parent)
try:
return self.wp.newTerm(blogid,
self._username,
self._password,
term)
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.newCategory", error)
# fallback to old call
try:
return self.wp.newCategory(blogid,
self._username,
self._password,
{ 'name' : newcat,
'slug' : slug,
'description' : desc,
'parent_id' : parent})
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.newCategory", error)
############################################################################
"""getRecentTitles
"""
def getRecentTitles(self, number):
blogid = self._getBlogID()
# First, try the Wordpress XMLRPC API calls
try:
response = self.wp.getPosts(blogid,
self._username,
self._password,
{ # filter parameter
'post_type' : 'post', # or 'page', 'attachment'
'post_status' : 'publish', # or 'draft', 'private, 'pending'
'number' : number,
'offset' : 0, # offset by # posts
'orderby' : '', # appears to have no effect
'order' : '', # appears to have no effect
},
['post_id', 'post_title', 'post_date'])
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.getRecentTitles", error)
else:
return [{'postid' : postmeta['post_id'],
'title' : postmeta['post_title'],
'dateCreated' : postmeta['post_date']} for postmeta in response ]
# The Wordpress XMLRPC API is not available, try the old MT API
try:
|
return self.mt.getRecentPostTitles(blogid,
self._username,
|
self._password,
number)
except (xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.getRecentTitles", error)
############################################################################
"""publishPost
"""
def publishPost(self, post):
blogid = self._getBlogID()
try:
return self.wp.newPost(blogid,
self._username,
self._password,
post.wpStruct)
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.publishPost", error)
try:
return self.metaWeblog.newPost(blogid,
self._username,
self._password,
post.metaweblogStruct,
post.publish)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.publishPost", error)
############################################################################
"""editPost
"""
def editPost(self, postid, post):
try:
if self.wp.editPost(self._getBlogID(),
self._username,
self._password,
postid,
post.wpStruct):
return postid
# error updating post
raise proxybase.ProxyError("wp.editPost", "post not updated")
except xmlrpclib.Fault as err:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.editPost", error)
try:
self.metaWeblog.editPost(postid,
self._username,
self._password,
post.metaweblogStruct,
post.publish)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.editPost", error)
return postid
############################################################################
"""getPo
|
mjirik/lisa
|
tests/texture_features_experiments_test.py
|
Python
|
bsd-3-clause
| 3,114
| 0.003215
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import sys
import os.path
from loguru import logger
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../experiments/"))
# sys.path.append(os.path.join(path_to_script, "../extern/sed3/"))
# sys.path.append(os.path.join(path_to_script, "../src/"))
import unittest
import pytest
import experiments.tiled_liver_statistics as tls
class TextureFeaturesExperimentTest(unittest.TestCase):
# @unittest.skip("comment after implementation")
@pytest.mark.slow
def test_run_experiments(self):
"""
"""
import lisa.texture_features as tfeat
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
import classification
self.dcmdir = os.path.join(
path_to_script, '../sample_data/jatra_06mm_jenjatraplus/')
yaml_file = os.path.join(
path_to_script, '../experiment
|
s/20130919_liver_statistics.yaml')
# write_csv(fvall)
gf = tfeat.GaborFeatures()
glcmf = tfeat.GlcmFeatures()
haralick = tfeat.HaralickFeatures()
list_of_feature_fcn = [
[tls.feat_hist, []],
# [gf.feats_gabor, []],
# [glcmf.feats_glcm, []],
# [haralick.feats_haralick, [True]]
]
list_of_classif
|
iers = [
# [GaussianNB, []],
# [svm.SVC, []],
[classification.GMMClassifier,
{'n_components': 2, 'covariance_type': 'full'}],
]
featrs_plus_classifs = tls.make_product_list(list_of_feature_fcn,
list_of_classifiers)
tile_shape = [50, 50, 50]
tls.experiment(yaml_file, yaml_file,
featrs_plus_classifs, tile_shape=tile_shape,
use_voxelsize_norm=False,
working_voxelsize_mm=[1, 1, 1],
visualization=False)
# slab = {'none':0, 'bone':8,'lungs':9,'heart':10}
# import pdb; pdb.set_trace()
# SupportStructureSegmentation
# sss = support_structure_segmentation.SupportStructureSegmentation(
# data3d = self.data3d,
# voxelsize_mm = self.metadata['voxelsize_mm'],
# modality = 'CT',
# slab = slab
#)
# sss.lungs_segmentation()
# sss.segmentation[260:270,160:170,1:10] = 2
# sss.visualization()
# total number of voxels segmented as bones in spine
# probebox1 = sss.segmentation [260:270,160:170,1:10]== slab['lungs']
# self.assertGreater(np.sum(probebox1),20)
# total number of voexel segmented as none in upper left corner
# probebox1 = sss.segmentation[10:20,10:20,5:15] == slab['none']
# self.assertGreater(np.sum(probebox1),900)
# import pdb; pdb.set_trace()
if __name__ == "__main__":
logging.basicConfig(stream=sys.stderr)
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
Seldaiendil/meyeOS
|
devtools/qooxdoo-1.5-sdk/tool/pylib/ecmascript/frontend/Scanner.py
|
Python
|
agpl-3.0
| 8,958
| 0.008038
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# The main purpose of this module is to provide a low-level JS scanner,
# materialized in the Scanner class. It only recognizes primitive lexems, like
# numbers, operato
|
rs, and symbol names, but nothing that requires context
# awareness like strings or
|
comments.
##
import sys, os, re, types
from collections import deque
##
# IterObject -- abstract base class for iterators, making them resettable and
# providing an immediate .next() method
#
class IterObject(object):
def __init__(self, inData):
self.inData = inData
self.resetIter()
def resetIter(self):
self._iter = self.__iter__()
self.next = self._iter.next
def __iter__(self):
raise RuntimeError("You have to overload the __iter__ method!")
##
# Scanner -- low-level scanner that reads text from a stream and returns simple tokens as tuples
#
# Usage:
# f=open('file.js')
# fs= f.read()
# x=Scanner(text)
# a=[y for y in Scanner(text)]
class Scanner(IterObject):
def __init__(self, stream):
super(Scanner, self).__init__(stream)
self.next_start = 0
patt = re.compile(ur'''
(?P<float>
\d*\.\d+(?:[eE][+-]?\d+)? # float, dotted
|\d+[eE][+-]?\d+ # undotted, with 'e'
)
|(?P<hexnum> 0x[0-9A-Fa-f]+) # hex number
|(?P<number> \d+) # number TODO: there is no such thing in JS!
|(?P<ident> [$\w]+) # identifier, name
|(?P<nl> # unicode line separators
\x0D\x0A
#|\x20\x28 # strange: this is ' (' !?
#|\x20\x29 # strange: this is ' )' !?
|\x0A
|\x0D
)
|(?P<white> (?:(?:\s|\ufeff)(?<!\n))+) # white ( + BOM - \n)
|(?P<mulop> # multi-char operators
<<=? # <<, <<=
|>= # >=
|<= # <=
|===? # ==, ===
|!==? # !=, !==
|[-+*/%|^&]= # -=, +=, *=, /=, %=, |=, ^=, &=
|>>>?=? # >>, >>>, >>=, >>>=
|&& # &&
|[|^]\| # ||, ^|
|\+\+ # ++
|-- # --
|:: # ::
|\.\. # ..
|// # // (end-of-line comment)
|/\* # /* (start multi-line comment)
|\*/ # */ (end multi-line comment)
)
|(?P<op> \W) # what remains (operators)
''', re.VERBOSE|re.DOTALL|re.MULTILINE|re.UNICODE) # re.LOCALE?!
# individual regex to search fast-forward to potential string ends (both comments and quoted)
stringEnd = {}
stringEnd['\n'] = re.compile('(?P<commI>.*(?=\n|$))', re.UNICODE)
stringEnd[r'\*/'] = re.compile(r'(?P<commM>.*?\*/)', re.DOTALL|re.MULTILINE|re.UNICODE)
stringEnd['"'] = re.compile(r'(?P<dquote>.*?")', re.UNICODE)
stringEnd["'"] = re.compile(r"(?P<squote>.*?')", re.UNICODE)
# yields :
# ( <group_name> , <scan_string> , <start_pos> , <scan_length> )
def __iter__1(self):
miter = self.patt.finditer(self.inData)
for mo in miter:
mo_lastgroup = mo.lastgroup
mstart = mo.start()
mend = mo.end()
if mstart != self.next_start: # assure compactness of scan
raise AssertionError, "There's a scan gap before: %s (at pos %d)" % (mo.group(), self.next_start)
self.next_start = mend # match range is [mo.start(), mo.end()[
yield (mo_lastgroup, mo.group(mo_lastgroup), mstart, mend - mstart)
def __iter__(self):
delimiter = None
inData = self.inData
lenData = len(inData)
cursor = 0
while cursor < lenData:
if delimiter:
mo = self.stringEnd[delimiter].search(inData, pos=cursor)
else:
mo = self.patt.match(inData, pos=cursor)
if mo:
mo_lastgroup = mo.lastgroup
mstart = mo.start()
mend = mo.end()
cursor = mend # when using the 'pos' parameter, mo.start/end refer to the *entire* underlying string
delimiter = (yield (mo_lastgroup, mo.group(mo_lastgroup), mstart, mend))
else:
raise SyntaxError("Unable to tokenize text starting with: \"%s\"" % inData[cursor:cursor+200])
##
# Token -- wraps a low-level scanner tuple into a simple object
class Token(object):
__slots__ = 'name', 'value', 'spos', 'len'
def __init__(self, ttup):
(
self.name, # type
self.value,
self.spos, # character position within stream
self.len, # length of value
) = ttup
def __str__(self):
return "(%s, %r, %d, %d)" % (self.name, self.value, self.spos, self.len)
##
# LQueue -- enhanced queue that allows push-back from one ("Left") side
#
# I'm using this class as a wrapper around (token) iterators, so I can not
# only get the next item from the iterator, but also push it back again.
# This allows peek-ahead processing of tokens, and methods can push tokens
# back into the stream if they find they don't want to use them.
# The implementation is based on a collections.deque double ended queue that
# uses one end (the "right" one) to fill from the iterator, and the other
# (the "left") end as the producer end for .next() iteration and the push-
# back method. Here are the schematics:
#
# -------------------------
# to consumer <--- LQueue <--- from source iterator
# (.next()) -------------------------
#
# from consumer--->
# (.putBack())
#
# The StopIteration exception is propagated (i.e.: uncaught) from the ori-
# ginal iterator. The interesting end of the deque is the left, hence the
# name "LQueue".
class LQueue(object):
def __init__(self, iterator):
self.iterator = iterator
self.queue = deque(())
def next(self, arg=None):
if len(self.queue) == 0:
self.queue.append(self.iterator.send(arg))
return self.queue.popleft()
##
# peek n tokens ahead
def peek(self, n=1):
toks = []
cnt = 0
# get the desired token
while cnt < n:
try:
t = self.next()
except StopIteration:
break
toks.append(t)
cnt += 1
# put all retrieved tokens back
for t in toks[::-1]:
self.putBack(t)
return toks
def putBack(self, item):
self.queue.appendleft(item)
def __iter__(self):
while True:
if len(self.queue) == 0:
self.queue.append(self.iterator.next()) # let self.iterator's StopIteration propagate
yield self.queue.popleft()
# - Helpers -------------------------------------------------------------------
##
# is_last_escaped -- check whether the last char in a string is escaped, i.e. preceded
# by an odd number of consecutive escape chars ("\")
def is_last_escaped(s):
i = len(s) - 2 # start from but-last char
c = 0
while i>=0: # indexing backwards
if s[i] == "\\":
|
mobolic/facebook-sdk
|
examples/get_posts.py
|
Python
|
apache-2.0
| 1,391
| 0
|
"""
A simple example script to get all posts on a user's timeline.
Originally created by Mitchell Stewart.
<https://gist.github.com/mylsb/10294040>
"""
import facebook
import requests
def some_action(post):
"""Here you might want to do something with each post. E.g. grab the
post's message (post['message']) or the post's picture (post['picture']).
In this implementation we just print the post's created time.
"""
print(post["created_time"])
# You'll need an access token here to do anything. You can get a temporary one
# here: https://developers.facebook.com/tools/explorer/
access_token = ""
# Look at Bill Gates's profile for this example by using his Facebook id.
user = "BillGates"
graph = facebook.GraphAPI(access_token)
profile = graph.get_object(user)
posts = graph.get_connections(profile["id"], "posts")
# Wrap this block in a while loop so we can keep paginating requests until
# finished.
while True:
try:
# Perform some action on each post in the collection we receive from
# Facebook.
[some_action(post=p
|
ost) for post in posts["data"]]
# Attempt to make a request to the next page of data, if it exists.
|
posts = requests.get(posts["paging"]["next"]).json()
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break
|
fbradyirl/home-assistant
|
tests/components/input_boolean/__init__.py
|
Python
|
apache-2.0
| 45
| 0
|
"""Te
|
sts for the input_bo
|
olean component."""
|
aperigault/ansible
|
lib/ansible/module_utils/postgres.py
|
Python
|
gpl-3.0
| 7,961
| 0.004271
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
# Most of this was originally added by other creators in the postgresql_user module.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
psycopg2 = None # This line needs for unit tests
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
from distutils.version import LooseVersion
def postgres_common_argument_spec():
"""
Return a dictionary with connection options.
The options are commonly used by most of PostgreSQL modules.
"""
return dict(
login_user=dict(default='postgres'),
login_password=dict(default='', no_log=True),
login_host=dict(default=''),
login_unix_socket=dict(default=''),
port=dict(type='int', default=5432, aliases=['login_port']),
ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
ca_cert=dict(aliases=['ssl_rootcert']),
)
def ensure_required_libs(module):
"""Check required libraries."""
if not HAS_PSYCOPG2:
module.fail_json(msg=missing_required_lib('psycopg2'))
if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
"""Connect to a PostgreSQL database.
Return psycopg2 connection object.
Args:
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
conn_params (dict) -- dictionary with connection parameters
Kwargs:
autocommit (bool) -- commit automatically (default False)
fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
"""
ensure_required_libs(module)
db_connection = None
try:
db_connection = psycopg2.connect(**conn_params)
if autocommit:
if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
# Switch role, if specified:
if module.params.get('session_role'):
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
cursor.execute('SET ROLE %s' % module.params['session_role'])
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e))
finally:
cursor.close()
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least '
'version 8.4 to support sslrootcert')
if fail_on_conn:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
else:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
db_connection = None
except Exception as e:
if fail_on_conn:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
else:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
db_connection = None
return db_connection
def exec_sql(obj, query, ddl=False,
|
add_to_executed=True):
"""Execute SQL.
Auxiliary function for PostgreSQL user clas
|
ses.
Returns a query result if possible or True/False if ddl=True arg was passed.
It necessary for statements that don't return any result (like DDL queries).
Arguments:
obj (obj) -- must be an object of a user class.
The object must have module (AnsibleModule class object) and
cursor (psycopg cursor object) attributes
query (str) -- SQL query to execute
ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
(default False)
add_to_executed (bool) -- append the query to obj.executed_queries attribute
"""
try:
obj.cursor.execute(query)
if add_to_executed:
obj.executed_queries.append(query)
if not ddl:
res = obj.cursor.fetchall()
return res
return True
except Exception as e:
obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
def get_conn_params(module, params_dict, warn_db_default=True):
"""Get connection parameters from the passed dictionary.
Return a dictionary with parameters to connect to PostgreSQL server.
Args:
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
params_dict (dict) -- dictionary with variables
Kwargs:
warn_db_default (bool) -- warn that the default DB is used (default True)
"""
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the return dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
# Might be different in the modules:
if params_dict.get('db'):
params_map['db'] = 'database'
elif params_dict.get('database'):
params_map['database'] = 'database'
elif params_dict.get('login_db'):
params_map['login_db'] = 'database'
else:
if warn_db_default:
module.warn('Database name has not been passed, '
'used default database to connect to.')
kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and params_dict["login_unix_socket"] != "":
kw["host"] = params_dict["login_unix_socket"]
return kw
|
karrtikr/ete
|
ete3/clustering/stats.py
|
Python
|
gpl-3.0
| 159,124
| 0.014555
|
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
# Copyright (c) 1999-2007 Gary Strangman; All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: Dec 18, 2007 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
|
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
descr
|
ibe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
from __future__ import absolute_import
from __future__ import print_function
## CHANGE LOG:
## ===========
## 07-11.26 ... conversion for numpy started
## 07-05-16 ... added Lin's Concordance Correlation Coefficient (alincc) and acov
## 05-08-21 ... added "Dice's coefficient"
## 04-10-26 ... added ap2t(), an ugly fcn for converting p-vals to T-vals
## 04-04-03 ... added amasslinregress() function to do regression on N-D arrays
## 03-01-03 ... CHANGED VERSION TO 0.6
## fixed atsem() to properly handle limits=None case
## improved histogram and median functions (estbinwidth) and
## fixed atvar() function (wrong answers for neg numbers?!?)
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqpro
|
ecolitan/fatics
|
venv/lib/python2.7/site-packages/netaddr/ip/__init__.py
|
Python
|
agpl-3.0
| 66,411
| 0.001837
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Routines for IPv4 and IPv6 addresses, subnets and ranges."""
import sys as _sys
import re as _re
from netaddr.core import AddrFormatError, AddrConversionError, num_bits, \
DictDotLookup, NOHOST, N, INET_PTON, P, ZEROFILL, Z
from netaddr.strategy import ipv4 as _ipv4, ipv6 as _ipv6
from netaddr.compat import _sys_maxint, _iter_range, _is_str, _int_type, \
_str_type
#-----------------------------------------------------------------------------
# Pre-compiled regexen used by cidr_merge() function.
RE_CIDR_ADJACENT = _re.compile(r'^([01]+)0 \1[1]$')
RE_CIDR_WITHIN = _re.compile(r'^([01]+) \1[10]+$')
RE_VALID_CIDR_BITS = _re.compile('^[01]+$')
#-----------------------------------------------------------------------------
class BaseIP(object):
"""
An abstract base class for common operations shared between various IP
related subclasses.
"""
__slots__ = ('_value', '_module')
def __init__(self):
"""Constructor."""
self._value = None
self._module = None
def _set_value(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.max_int:
raise AddrFormatError('value out of bounds for an %s address!' \
% self._module.family_name)
self._value = value
value = property(lambda self: self._value, _set_value,
doc='a positive integer representing the value of IP address/subnet.')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
return NotImplemented
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPAddress`
correctly.
"""
return NotImplemented
def __hash__(self):
"""
:return: A hash value uniquely indentifying this IP object.
"""
return hash(self.key())
def __eq__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() == other.key()
except (AttributeError, TypeError):
return NotImplemented
def __ne__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
not equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() != other.key()
except (AttributeError, TypeError):
return NotImplemented
def __lt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() < other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __le__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() <= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __gt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() > other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __ge__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() >= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def is_unicast(self):
""":return: ``True`` if this IP is unicast, ``False`` otherwise"""
return not self.is_multicast()
def is_multicast(self):
""":return: ``True`` if this IP is multicast, ``False`` otherwise"""
if self._module == _ipv4:
return self in IPV4_MULTICAST
elif self._module == _ipv6:
return self in IPV6_MULTICAST
def is_loopback(self):
"""
:return: ``True`` if this IP is loopback address (not for network
transmission), ``False`` otherwise.
References: RFC 3330 and 4291.
"""
if self.version == 4:
return self in IPV4_LOOPBACK
elif self.version == 6:
return self == IPV6_LOOPBACK
def is_private(self):
"""
|
:return: ``True`` if this IP is for internal/private use only
(i.e. non-public), ``False`` otherwi
|
se. Reference: RFCs 1918,
3330, 4193, 3879 and 2365.
"""
if self.version == 4:
for cidr in IPV4_PRIVATE:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_PRIVATE:
if self in cidr:
return True
if self.is_link_local():
return True
return False
def is_link_local(self):
"""
:return: ``True`` if this IP is link-local address ``False`` otherwise.
Reference: RFCs 3927 and 4291.
"""
if self.version == 4:
return self in IPV4_LINK_LOCAL
elif self.version == 6:
return self in IPV6_LINK_LOCAL
def is_reserved(self):
"""
:return: ``True`` if this IP is in IANA reserved range, ``False``
otherwise. Reference: RFCs 3330 and 3171.
"""
if self.version == 4:
for cidr in IPV4_RESERVED:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_RESERVED:
if self in cidr:
return True
return False
def is_ipv4_mapped(self):
"""
:return: ``True`` if this IP is IPv4-compatible IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0xffff
def is_ipv4_compat(self):
"""
:return: ``True`` if this IP is IPv4-mapped IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0
@property
def info(self):
"""
A record dict containing IANA registration details for this IP address
if available, None otherwise.
"""
# Lazy loading of IANA data structures.
from netaddr.ip.iana import query
return DictDotLookup(query(self))
@property
def version(self):
"""the IP protocol version represented by this IP object."""
return self._module.version
#-----------------------------------------------------------------------------
class IPAddress(BaseIP):
"""
An individual IPv4 or IPv6 address without a net mask or subnet prefix.
To support these and other network based operations, see `IPNetwork`.
"""
__slots__ = ()
def __init__(self, addr, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address which may be represented in an
accepted string format, as an unsigned integer or as an
|
pinax/django-waitinglist
|
waitinglist/stats.py
|
Python
|
mit
| 842
| 0.002375
|
import datetime
from django.conf import settings
from django.utils import timezone
from account.models import SignupCode
from waitinglist.models import WaitingListEntry
User = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def stats():
waiting_list = WaitingListEntry.objects
return {
"waiting_list_entries": waiting_list.count(),
"waitinglist_added_last_seven_days":
waiting_list.filter(created__gt=ti
|
mezone.now() - datetime.timedelta(days=7)).count(),
"waitinglist_added_last_thirty_days":
waiting_list.filter(created__gt=timezone.now() - datetime.timedelta(days=30)).count(),
"waiting_list_entries_to_invi
|
te":
waiting_list.exclude(email__in=SignupCode.objects.values("email"))
.exclude(email__in=User.objects.values("email")).count()
}
|
brianjimenez/lightdock
|
bin/post/lgd_filter_membrane.py
|
Python
|
gpl-3.0
| 5,399
| 0.00463
|
#!/usr/bin/env python
"""Filter LightDock final swarm results depending on the compatibility with the membrane"""
from __future__ import print_function
import sys
import os
import argparse
import shutil
import re
from prody.measure.contacts import Contacts
from prody import parsePDB, confProDy
from lightdock.util.logger import LoggingManager
from lightdock.util.analysis import read_ranking_file
from lightdock.pdbutil.PDBIO import parse_complex_from_file
from lightdock.structure.complex import Complex
# Disable ProDy output
confProDy(verbosity='info')
filtered_folder = 'filtered'
log = LoggingManager.get_logger('lgd_filter_membrane')
def get_structures(ranking, base_path='.'):
structures = []
for rank in ranking:
swarm_id = rank.id_cluster
glowworm_id = rank.id_glowworm
structures.append(os.path.join(base_path,
'swarm_{}'.format(swarm_id),
'lightdock_{}.pdb'.format(glowworm_id)))
return structures
def get_restraints(restraints_file):
restraints_receptor = set()
restraints_ligand = set()
with open(restraints_file) as handle:
for line in handle:
line = line.rstrip(os.linesep)
if line:
if line.startswith('R'):
restraints_receptor.add(line.split()[-1])
if line.startswith('L'):
restraints_ligand.add(line.split()[-1])
return restraints_receptor, restraints_ligand
def calculate_membrane_height(parsed_receptor_file, restraints):
atoms, residues, chains = parse_complex_from_file(parsed_receptor_file)
receptor = Complex(chains, atoms)
z_coord = []
for restraint in restraints:
chain_id, residue_name, residue_number = restraint.split(".")
residue = receptor.get_residue(chain_id, residue_name, residue_number)
ca = residue.get_calpha()
z_coord.append(ca.z)
return min(z_coord)
def parse_command_line():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(prog='lgd_filter_restraints')
parser.add_argument("ranking_file", help="Path of ranking to be used", metavar="ranking_file")
parser.add_argument("restraints_file", help="File including restraints", metavar="restraints_file")
parser.add_argument("parsed_receptor_file", help="Receptor PDB parsed by LightDock", metavar="parsed_receptor_file")
parser.add_argument("receptor_chains", help="Chains on the receptor partner", metavar="receptor_chains")
parser.add_argument("ligand_chains", help="Chains on the receptor partner", metavar="ligand_chains")
parser.add_argument("--cutoff", "-cutoff", "-c", help="Interaction cutoff",
dest="cutoff", type=float, default=1.0)
return parser.parse_args()
if __name__ == '__main__':
# Parse command line
args = parse_command_line()
log.info("Cutoff for membrane is {:3.1f}A".format(args.cutoff))
# Get ranking
ranking = read_ranking_file(args.ranking_file)
# Get all the PDB structures in a given directory
base_path = os.path.abspath(os.path.dirname(args.ranking_file))
structures = get_structures(ranking, base_path)
restraints_receptor, restraints_ligand = get_restraints(args.restraints_file)
membrane_height_z = calculate_membrane_height(args.parsed_receptor_file, restraints_receptor)
if os.path.exists(filtered_folder):
raise SystemExit("Folder {} already exists".format(filtered_folder))
else:
os.makedirs(filtered_folder)
filter_passed = {}
percentages = {}
for pdb_file in structures:
try:
swarm_id = int(re.findall(r'swarm_\d+', pdb_file)[0].split('_')[-1])
glowworm_id = int(re.findall(r'lightdock_\d+', pdb_file)[0].split('_')[-1])
# Read molecule and split by receptor and ligand
molecule = parsePDB(pdb_file)
ca_ligand = molecule.select('protein and chain {} and calpha'.format(args.ligand_chains))
# Contacts on ligand side
out = 0
for ca in ca_ligand:
coord = ca.getCoords()
if coord[-1] >= membrane_height_z:
out += 1
perc = out / float(len(ca_ligand))
if perc >= args.cutoff:
percentages[(swarm_id, glowworm_id)] = perc
shutil.copyfile(pdb_file, os.path.join(filtered_folder, 'swarm_{}_{}.pdb'.format(swarm_id, glowworm_id)))
try:
filter_passed[swarm_id].append(glowworm_id)
except:
filter_passed[swarm_id] = [glowworm_id]
|
print("{:40s} {:5.3f}".format(pdb_file, perc))
except Exception, e:
log.error('Filtering has failed for structure {}. Please see error:'.format(pdb_file))
log.error(str(e))
filtered_ranking = os.path.join(filtered_folder, 'rank_filtered.list')
with open(filtered_ranking, 'w') as handle:
for rank in ranking:
if rank.id_cluster in
|
filter_passed and rank.id_glowworm in filter_passed[rank.id_cluster]:
handle.write('swarm_{}_{}.pdb {:5.3f} {:5.3f}'.format(rank.id_cluster,
rank.id_glowworm, rank.scoring, percentages[(rank.id_cluster, rank.id_glowworm)]) + os.linesep)
|
gdestuynder/MozDef
|
mozdef_util/mozdef_util/query_models/exists_match.py
|
Python
|
mpl-2.0
| 369
| 0
|
#
|
!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from elasticsearch_dsl import Q
def ExistsMatch(field_name):
return Q('exis
|
ts', field=field_name)
|
wonderbeyond/ezlog
|
pages/context_processors.py
|
Python
|
bsd-2-clause
| 150
| 0.013333
|
# coding=utf-8
from pages.models import Page
def nav_page
|
s(request):
return {'nav_pages': P
|
age.objects.filter(public=True, in_navigation=True),}
|
linostar/timeline-clone
|
test/specs/db/DbOpen.py
|
Python
|
gpl-3.0
| 5,595
| 0.000357
|
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import codecs
from specs.utils import a_category_with
from specs.utils import TmpDirTestCase
from timelinelib.calendar.gregorian import Gregorian
from timelinelib.db.exceptions import TimelineIOError
from timelinelib.db import db_open
from timelinelib.drawing.viewproperties import ViewProperties
import wx
CONTENT_010 = u"""
# Written by Timeline 0.1.0 on 2009-11-15 19:28:7
PREFERRED-PERIOD:2009-10-17 22:38:32;2009-12-2 16:22:4
CATEGORY:Category 1;188,129,224;True
CATEGORY:Category 2;255,165,0;True
CATEGORY:Category 3;173,216,230;False
EVENT:2009-11-4 22:52:0;2009-11-11 22:52:0;Event 1;Category 1
""".strip()
CONTENT_0100 = u"""
<?xml version="1.0" encoding="utf-8"?>
<timeline>
<version>0.10.0</version>
<categories>
<category>
<name>Category 1</name>
<color>188,129,224</color>
</category>
<category>
<name>Category 2</name>
<color>255,165,0</color>
<parent>Category 1</parent>
</category>
<category>
<name>Category 3</name>
<color>173,216,230<
|
/color>
<parent>Category 2</parent>
</category>
</categories>
<events>
<event>
<start>2009-11-4 22:52:0</start>
<end>2009-11-11 22:52:0</end>
<text>Event 1</text>
<category>Category 1</category>
<description>The first event.</description>
</event>
</events>
<view>
<displayed_period>
<start>2009-10-17 22:38:32</start>
|
<end>2009-12-2 16:22:4</end>
</displayed_period>
<hidden_categories>
<name>Category 3</name>
</hidden_categories>
</view>
</timeline>
""".strip()
class DbOpenSpec(TmpDirTestCase):
def test_raises_error_when_reading_non_xml_file(self):
self.writeContentToTmpFile(CONTENT_010)
try:
db_open(self.tmp_path)
except TimelineIOError, e:
self.assertTrue("old file with a new version" in str(e))
def testRead0100File(self):
self.writeContentToTmpFile(CONTENT_0100)
db = db_open(self.tmp_path)
# Assert event correctly loaded
events = db.get_all_events()
self.assertEqual(len(events), 1)
event = events[0]
self.assertTrue(event.has_id())
self.assertEqual(event.get_text(), "Event 1")
self.assertEqual(event.get_time_period().start_time,
Gregorian(2009, 11, 4, 22, 52, 0).to_time())
self.assertEqual(event.get_time_period().end_time,
Gregorian(2009, 11, 11, 22, 52, 0).to_time())
self.assertEqual(event.get_category().get_name(), "Category 1")
self.assertEqual(event.get_data("description"), "The first event.")
self.assertEqual(event.get_data("icon"), None)
# Assert that correct view properties are loaded (category visibility
# checked later)
vp = ViewProperties()
db.load_view_properties(vp)
self.assertEqual(vp.displayed_period.start_time,
Gregorian(2009, 10, 17, 22, 38, 32).to_time())
self.assertEqual(vp.displayed_period.end_time,
Gregorian(2009, 12, 2, 16, 22, 4).to_time())
# Assert categories correctly loaded
categories = db.get_categories()
self.assertEqual(len(categories), 3)
for cat in categories:
self.assertTrue(cat.has_id())
if cat.get_name() == "Category 1":
self.assertEqual(cat.get_color(), (188, 129, 224))
self.assertTrue(vp.is_category_visible(cat))
self.assertEqual(cat.get_parent(), None)
elif cat.get_name() == "Category 2":
self.assertEqual(cat.get_color(), (255, 165, 0))
self.assertTrue(vp.is_category_visible(cat))
self.assertEqual(cat.get_parent().get_name(), "Category 1")
elif cat.get_name() == "Category 3":
self.assertEqual(cat.get_color(), (173, 216, 230))
self.assertFalse(vp.is_category_visible(cat))
self.assertEqual(cat.get_parent().get_name(), "Category 2")
else:
self.fail("Unknown category.")
def test_creates_new_xml_file(self):
new_db = db_open(self.tmp_path)
new_db.save_category(a_category_with(name="work"))
re_read_db = db_open(self.tmp_path)
self.assertEqual(len(re_read_db.get_categories()), 1)
self.assertEqual(re_read_db.get_categories()[0].get_name(), "work")
def setUp(self):
TmpDirTestCase.setUp(self)
self.tmp_path = self.get_tmp_path("test.timeline")
def writeContentToTmpFile(self, content):
f = codecs.open(self.tmp_path, "w", "utf-8")
f.write(content)
f.close()
|
c0deforfun/LLL
|
ui/resources_rc.py
|
Python
|
mit
| 138,528
| 0.000036
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun Feb 8 12:30:31 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x0f\x0a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5c\x72\xa8\x66\
\x00\x00\x0e\xd1\x49\x44\x41\x54\x78\xda\xed\x9d\x0b\xb0\x55\x55\
\x19\xc7\xe1\xf2\x94\xfb\x38\x7b\x9f\x0b\x5c\x10\x14\x49\x66\xa4\
\x10\x19\x51\x92\x4c\xc0\x88\x14\xe3\x21\xc8\x63\x50\x98\x72\x84\
\x51\xb2\x04\x72\x18\x04\x04\x1d\x42\x12\xd3\x42\x2d\x15\x0d\xb2\
\xc1\x34\x2d\x04\x95\x28\xdf\x51\x34\xc5\x14\x64\x8c\xa2\xa8\x40\
\x11\x84\x88\x10\xa9\xa8\x14\x0f\xfb\x16\x9c\x0b\x2a\x08\xf7\xdc\
\xbb\xd7\xb7\xd7\x5a\xfb\xf7\x9b\xf9\xa6\xa9\xb8\xf7\xee\xf5\xad\
\xff\xff\x7f\xf6\xde\x67\xef\xb5\xea\xd5\x03\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9f\x89\xe3\x38\x27\x9c\x15\
\x45\xd1\xc0\x5c\x1c\x8f\x8d\xa2\xfc\x4c\xf9\xcf\x7b\xa5\x66\x57\
\x44\xd1\x35\xb9\x5c\x7e\x48\x79\x3e\xff\x85\xd2\x16\x2d\x5a\xd1\
\x2d\x80\x30\x4c\x7f\xf2\x01\x73\x47\xf9\x67\x73\x51\xbc\x47\xea\
\xc3\x1a\xd6\x2a\xf9\xb9\xe9\xf2\xf3\x9d\xe9\x22\x80\x67\xc8\xa7\
\xf9\x50\x31\xfd\xca\x22\x0c\x7f\xac\x5a\x2f\x61\x30\x4e\x7e\x6d\
\x63\x3a\x0b\xe0\x30\x72\x7a\xdf\x53\x4e\xe9\x57\x24\x64\xfc\x23\
\x83\x20\x8e\x47\xc8\x9f\xa9\x4f\xa7\xc1\xdd\x4f\xbf\x28\x3e\x45\
\xea\x12\xa9\x91\x52\x9f\x95\x2a\x09\x7d\xcc\xe6\xba\x3d\x8a\xe2\
\x25\x96\x8c\xff\x89\xca\xaf\x94\xa0\xe9\x12\xa8\x76\x4a\xa4\x3a\
\x4a\x5d\x2a\x35\x44\xaa\x3d\x8e\xf2\x63\xe2\xba\x49\x3d\x29\xb5\
\xe3\x28\xa2\xdd\x25\xb5\x5c\xaa\x7f\x98\xa7\xfb\xb9\xb3\x65\x6c\
\x9b\x75\xcc\xff\x91\x9e\xca\x65\x46\x40\xfa\xe9\x5f\xd0\xc8\xbb\
\x47\x19\xeb\x4e\xa9\x67\xa4\xba\xe3\x34\xf7\x26\xae\xa9\xd4\x2d\
\x52\x7b\x6b\x28\xdc\x07\xa4\xf2\xa1\x8c\x5f\x4e\xc9\x47\xe6\x72\
\xf1\x07\xca\xe6\xaf\xae\xfd\xf2\x4d\xc2\x0c\x9f\x2f\x09\x8c\x16\
\x0a\x9a\xa8\xc9\x78\xf7\x49\xdd\x66\x34\x87\xf3\xdc\x98\xbc\x53\
\xa5\xd6\xd6\x42\xb8\x5b\xa5\xfa\x78\x3f\xfe\x38\xbe\x3e\x25\xe3\
\x7f\xbc\xe2\xf8\x11\x39\x9c\x12\x0f\xf5\xd3\xa7\xa0\x85\x62\xc7\
\xfc\xaa\xd1\x1e\x0e\x4c\xff\x5a\x6d\x79\x1d\x84\xbb\x5b\xaa\xaf\
\xc7\x9f\xfc\x23\x9c\x30\xff\xe1\x10\x98\xed\x99\x7e\xfa\x16\x34\
\x50\xdb\x31\x2f\xcf\xc2\xbd\x25\x97\x27\x70\x7c\x02\xc2\xf5\x32\
\x04\x0a\xd7\xfc\xef\x3b\x15\x00\x52\xe6\x72\x24\x23\xe6\xaf\xae\
\x09\x38\x31\x9d\x09\xec\x20\xf5\x5e\x42\xc2\xf5\x2a\x04\x9a\x35\
\x6f\xde\x3a\x85\x1b\x7e\x35\x2b\xb9\x17\x51\x51\x51\xf9\xf9\x8c\
\x98\xff\xc3\x82\x06\x3b\xe0\x48\xfd\x49\x9c\x9e\xb0\x78\xbd\x09\
\x01\xf9\x0a\xee\x51\x27\xcd\x7f\xa8\xa2\x57\xe5\x30\x1b\x66\xc0\
\xfc\xd5\x35\x1d\x47\xea\x4f\xe4\x62\x0b\xe2\x75\x3e\x04\x2a\xf2\
\xf9\xee\x6e\x9b\xff\xd0\xfd\x80\xb1\x19\x31\xbf\xa9\xc5\x38\x52\
\x7f\x32\x37\x5a\x12\xaf\xd3\x21\x50\x11\xc5\xbf\xf7\x22\x00\x72\
\xf1\x1b\x55\x55\x55\xa5\x19\x30\xbf\xa9\x8d\x38\x52\x77\x32\x23\
\xcb\x02\x76\x32\x04\xe4\xa5\x9c\x01\x5e\x98\xbf\xfa\x86\xa0\xbc\
\x48\x94\x01\xf3\x57\x57\x84\x33\xf5\x26\xb4\x81\xc2\x1d\x70\xe7\
\x42\x40\x8e\x67\x99\x4f\x01\x20\xb5\x5d\x0e\xbb\x41\x06\xcc\x6f\
\xb4\xd8\x00\x67\xea\x4e\xec\x0a\x05\x01\x3b\x13\x02\xe5\xe5\xe5\
\x95\x45\x3c\xe9\xe8\x4c\xc9\xfb\x02\xbd\x02\x37\xbf\xa9\x15\x38\
\x52\x7f\x72\xef\x56\x12\xb1\x13\x21\x20\x46\xfa\xba\x6f\xe6\x3f\
\x78\x19\x90\x9f\x13\xb8\xf9\x4d\xdd\x8d\x23\xf5\x27\xf8\x32\x45\
\x21\xa7\x1e\x02\xf2\xd5\xdf\x22\x1f\x03\x40\x6a\x43\xe0\xe6\x37\
\x75\x19\x8e\xd4\x9f\xe4\xba\x3e\x06\xec\x53\x08\x34\x4a\xf0\xa1\
\x27\xf5\x2a\xcb\xe7\x3f\x17\xb0\xf9\x79\x1c\x38\xc5\x10\xe8\xa0\
\x6c\x8c\x54\x42\xc0\x2c\xe9\xe5\xab\xf9\x0b\xf7\x01\x06\x06\x6a\
\x7e\x9e\x02\x74\x20\x04\xc6\x2b\x0b\x5a\x3d\x04\xcc\x22\x9d\x3e\
\x07\x80\x3c\x14\xf4\x8d\x00\xcd\x6f\x6a\x3c\x0e\x4c\x3f\x00\xea\
\x2b\xde\x10\x4c\x25\x04\xcc\x4a\xbd\x7e\x9f\x01\xe4\x67\x06\x68\
\x7e\xa3\x39\x96\x45\x73\x28\x04\xee\x09\x35\x04\x0e\xae\xe8\xeb\
\xf1\x19\x40\x14\xcd\xc7\xfc\x40\x08\xd4\xfa\x2b\xc0\xfc\x0c\x9f\
\x03\x40\x1e\x5f\x7e\x1c\xf3\x03\x21\xc0\x19\x00\xe6\x07\x42\x80\
\x7b\x00\x98\x1f\x08\x81\x9a\x9f\x01\xf8\xf2\x0a\xb0\xc2\xab\xc1\
\x98\x1f\x32\x17\x02\x3c\x07\x80\xf9\x21\xdb\x21\xd0\xf0\x53\xd6\
\xaa\xf7\xa2\xca\x2b\x2b\x3b\x62\x7e\x48\x33\x04\xe6\xfa\x1e\x02\
\x72\x23\x6d\xa1\xa7\x01\xb0\x0e\xf3\x03\x21\x50\xd7\xfb\x00\x71\
\x3c\xca\xd3\x6f\x00\x6e\xc3\xfc\x40\x08\xd4\xf9\x9b\x80\x5c\x5c\
\xe4\xd6\xde\x4e\x94\xdc\xbf\xe8\x81\xf9\x81\x10\x48\xe4\xf8\xf3\
\xcf\x79\x16\x00\xdb\xea\xd5\x72\xb7\x20\xcc\x0f\x84\xc0\x91\x97\
\x01\x17\x79\xf6\xf5\xdf\x14\xcc\x0f\x84\x40\x82\x21\x20\xbf\xe3\
\x79\x4f\x02\x60\x73\xdb\xb6\x6d\x4f\xc0\xfc\x40\x08\x24\x18\x02\
\x85\x2d\xc1\xf6\xbb\xbf\x2c\x78\x7e\x34\xe6\x07\x42\xc0\x46\x08\
\xc8\x4e\xbc\x6e\x3f\xf8\x13\xaf\xa9\x57\xe4\x6a\xc0\x98\x1f\xd2\
\x0c\x81\x7b\x7d\x0a\x81\xb2\xb2\xb2\x96\x16\x37\x46\xa9\xfb\x2a\
\x39\xb9\x5c\x57\xcc\x0f\x84\x80\xc5\x10\x90\xc7\x6b\xbb\xc8\xcf\
\xef\x72\xcc\xfc\xfb\xe5\xd4\x7f\x18\xe6\x07\x42\x40\x21\x04\x0a\
\x6f\x09\xee\x77\xe7\xd4\x3f\x3f\x03\xf3\x03\x21\xa0\x1b\x02\x13\
\x5d\x08\x01\xb9\xee\x5f\x20\x87\x53\x1f\xf3\x03\x21\xa0\x1f\x02\
\xc3\x52\x5c\x36\x7c\xbf\x2c\x58\x32\x8d\x4f\x7e\x20\x04\x52\x0d\
\x81\x5c\x57\xf9\xf9\x4d\xca\xc7\xbc\x4b\xee\x45\x0c\xc6\xfc\x40\
\x08\x38\x10\x02\xa5\x2d\x5a\xb4\x92\x53\xf1\x25\x3a\xc7\x9a\x5f\
\x69\x6e\x44\x62\x7e\x08\x3d\x04\xee\xf3\xed\x39\x01\x31\x66\x4f\
\x79\x56\xc0\xd6\xe6\xa9\xeb\xe5\x91\xe4\x11\xc5\x5c\xef\x63\x7e\
\x20\x04\xd2\x78\x6c\x38\x97\x1f\x6a\x3e\xa9\x13\x33\x7e\x14\x8d\
\x93\x5f\xdb\xb8\x16\x3d\xc4\xfc\x40\x08\xa4\xf5\x16\xa1\x59\x52\
\xec\xe0\xca\xc2\xf9\x67\x8b\x7c\xa5\x78\x95\xfc\xdc\x74\xf9\xf9\
\xce\x75\xe8\x1d\xe6\x07\x42\xc0\x95\x95\x85\xc4\xcc\x39\x73\xc3\
\x50\x4e\xe3\xfb\xc9\x19\xc2\x18\x73\x07\x5f\xbe\xbf\xbf\xe3\xc0\
\xde\x03\xb2\x85\x97\x5c\x3e\x0c\xaa\xa8\xa8\x3c\xc7\xdc\x4f\x48\
\xa0\x67\x98\x1f\x08\x01\xdf\xb7\x26\xc7\xfc\x00\x84\x00\xe6\x07\
\x20\x04\x30\x3f\x80\xdf\xdf\x0e\x44\x71\x0b\x29\xb3\xe5\xf8\x20\
\xa9\xab\xa4\x6e\x90\xfa\x9e\xd4\x75\x52\x63\x0a\xff\xfb\x69\xc5\
\x9a\x0a\xf3\x43\x16\x43\xe0\xc7\xae\x87\x80\xfc\x7b\xb3\xaf\x40\
\x0f\xa9\x59\xe6\xce\x7e\x11\xef\x0e\xec\x90\x5a\x2a\x35\x49\xaa\
\x2d\xe6\x07\xf0\x28\x04\xe4\xdf\x54\x48\x4d\x91\x7a\x33\x81\xbf\
\xb9\x4f\xea\x49\xa9\xe1\x52\x25\x98\x1f\xc0\xd1\x10\x90\xff\xbd\
\x52\x6a\xa6\xd4\x4e\x4b\x7f\xfb\x6f\x52\xbd\x31\x3f\x80\x63\x21\
\x20\xff\xfd\xa2\x84\x3e\xf1\x6b\x52\x4f\x63\x7e\x00\x07\x42\x40\
\xaa\x89\xd4\x1c\x2f\x16\x0c\xc5\xfc\x40\x08\x24\x1e\x02\x
|
6b\x02\
\x36\x3e\xe6\x07\x42\x20\xc3\x85\xf9\xc1\xcb\x10\x98\x87\x79\x31\
\x3f\x10\x02\x18\x19\xf3\x03\x21\x40\x61\x7e\x20\x04\x28\xcc\x0f\
\x84\x00\x85\xf9\x81\x10\xa0\x30\x3f\x10\x02\x98\x1f\xf3\x03\x21\
\x80\xf9\x01\x32\x13\x02\xf3\x31\x3f\xe6\
|
x07\x42\x00\xf3\x03\x10\
\x02\x98\x1f\x80\x10\xc0\xfc\x00\x84\x00\xe6\x07\x20\x04\x30\x3f\
\x00\x21\xe0\x7d\x6d\xc2\xfc\x00\x35\x0f\x81\x1f\x05\x16\x00\xdb\
\x99\x59\x80\xe2\x82\xa0\x9f\xd4\x96\x40\x02\x60\x0f\x33\x0a\x50\
\x7c\x08\xc4\x52\xbf\x0d\x24\x04\x9a\x30\xa3\x00\xc5\x87\xc0\xec\
\x40\x02\xa0\x29\xb3\x09\x40\x00\x00\x00\x01\x00\x00\x04\x00\x00\
\x10\x00\x00\x40\x00\x00\x00\x01\x00\x00\x04\x00\x00\x01\x40\x00\
\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\
\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\
\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\
\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\
\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\
\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\
\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\
\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\
\x00\x00\x40\x00\x00\x00\x01\x00\x00\x04\x00\x00\x10\x00\x00\x40\
\x00\x00\x00\x01\x00\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\
\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\
\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\
\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x80\x5f\
\x01\x70\x53\x20\x01\xd0\x98\xd9\x04\x28\x3e\x00\xae\x0e\xc0\xfc\
\xfb\x98\x49\x80\xda\x05\x40\xbf\x00\x02\xe0\x3d\x66\x12\
|
faridborbar/01Tarea
|
Codigo.py
|
Python
|
mit
| 4,518
| 0.016383
|
#Aqui resolveremos los puntos de la tarea
import
|
time
import matplotlib.pyplot as plt
import numpy as np
from astropy import c
|
onstants as const
from astropy import units as un
from scipy import integrate
#PRIMERA PARTE
#Cargamos los datos y definimos arreglos para la Longitud y el Flujo
Datos = np.loadtxt("sun_AM0.dat")
Longitud = Datos[:,0]
Flujo = Datos[:,1]
#declaramos las unidades y las convertimos
UniLongitud = Longitud*un.nm #longitud en nanometros
UniFlujo = Flujo*un.W*(un.m**-2)*(un.nm**-1) #Flujo en watts, dividido metros cuadrados, dividido nanometros
Longitud_um= UniLongitud.to('um') #convertimos a [um]
Flujo_cgs= UniFlujo.to('erg/(s cm2 um)') #convertimos a [cgs]
plt.clf()
plt.plot(Longitud_um, Flujo_cgs)
plt.xlim(0,8)
plt.xlabel('Longitud de onda [$ \mu m $]')
plt.ylabel('Flujo de Energia [$ erg / s * cm^2 * \mu m$]')
plt.title('Grafico de Flujo de Energia en relacion con la Longitud de onda incidente')
plt.savefig('Grafico1.png', bbox_inches='tight')
#plt.show()
#Segunda Parte, Tenemos que integrar la funcion anterior
#lo haremos usando el metodo del trapecio visto enclases
m = len(Longitud_um)
n = len(Flujo_cgs)
CSolar=0
Ttrapecio=time.time() #Enserramos la funcion, con contadores para ver cuanto se demora
for i in range(n-1):
paso = (Longitud_um[i+1] - Longitud_um[i])
trapecio = ((Flujo_cgs[i+1] + Flujo_cgs[i]) * paso /2)
CSolar += trapecio # El area calculada corresponde a la constante Solar
Ttrapecio = time.time()-Ttrapecio #asignamos a una variable el tiempo que demora nuestro metodo
#En paralelo usamos el metodo de python para calcular la misma integral
TcompTrapecio = time.time() #Iniciamos el contador para le metodo de python
ConstanteComparacionT1 = np.trapz(Flujo_cgs , Longitud_um)
TcompTrapecio = time.time()-TcompTrapecio #Cerramos el contador
print 'constantes solares con el metodo del trapecio propio y el de python, respectivamente'
print (CSolar)
print(ConstanteComparacionT1)
#TERCERA PARTE
#Buscamos calcular el flujo energetico del sol atravez de una unidad de superficie en la atmosfera solar en una unidad de tiempo
CantiInter = input('Indique la cantidad de intervalos para la integracion (maximo 100)')
Salto = (np.pi/2-0.01)/CantiInter #Me indica la distancia entre los cuadros a integrar
Intervalo = np.arange(0.01, np.pi/2, Salto) #Intervalo discreto a integrar,no se puede partir de 0 asi que elejimos 0,01
Paso = Intervalo[1] - Intervalo[0] #la distancia entre los elementos del intervalo es la misma asi que usamos un salto cualquiera.
AreaS = 0
T = 5778*un.K
Constantes = ((2*np.pi*const.h)/((const.c)**2)) * ((const.k_B*T)/(const.h))**4 #constantes que acompañan la integral
Tamano = len(Intervalo)
TSimpson=time.time() #Iniciamos el contador para el metodo de simpson
def Integral(y): #Definimos el argumento de la integral como una funcion para simplificar los calculos
funcion = (np.tan(y)**3 + np.tan(y)**5) / ((np.exp(np.tan(y)))-1)
return funcion
#Ahora iteramos para integrar a los largo de los elementos [k] del intervalo evaluados en la funcion
for k in range(0, (Tamano-2)):
simpson = (Paso/6.0)*((Integral(Intervalo[k])) + 4*Integral(Intervalo[k+1]) + Integral(Intervalo[k+2]))
AreaS += simpson
FlujoSolar = Constantes*AreaS # las constantes por el area calculada (integral)
TSimpson= time.time() - TSimpson #Cerramos el contador
#Y ahora usamos el metodo de comparacion Quad de python
TCompSimpson=time.time() #iniciamos el contador
FlujoCom = integrate.quad(Integral, 0, np.pi/2)
FlujoCom = FlujoCom * Constantes
TCompSimpson=time.time() - TCompSimpson #cerramos el contador
print 'flujos solares, calculados con el metodo de Simpson y Quad respectivamente'
print FlujoSolar
print FlujoCom
#Ahora calculamos el radio del sol en base al flujo de energia en una seccion de la atmosfera terrestre y la constante a0.
a0= const.au
CSolar= CSolar.to('J /(m2 s)') # cambio de unidades de la constante Solar, para que calzen
#El radio esta dado por la raiz cuadrada de la relacion entre la constante solar y el flujo, multiplicada por la constante a0
Radio = (np.sqrt((CSolar / FlujoSolar)))*a0
print 'Radio'
print Radio
print ' Tiempo que demoran las integracion que realizamos, con sus respectivas comparaciones'
print 'Constante Solar(metodo del trapecio)'
print Ttrapecio
print TcompTrapecio
print 'Flujo Solar (Metodo de Simpson y Quad)'
print TSimpson
print TCompSimpson
|
slarosa/QGIS
|
python/plugins/sextante/lidar/lastools/lassplit.py
|
Python
|
gpl-2.0
| 2,455
| 0.001222
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lassplit.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
********************************************
|
*******************************
*
|
*
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from sextante.lidar.lastools.LasToolsUtils import LasToolsUtils
from sextante.lidar.lastools.LasToolsAlgorithm import LasToolsAlgorithm
from sextante.parameters.ParameterFile import ParameterFile
from sextante.outputs.OutputFile import OutputFile
from sextante.parameters.ParameterNumber import ParameterNumber
class lassplit(LasToolsAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NUM_POINTS = "NUM_POINTS"
def defineCharacteristics(self):
self.name = "lassplit"
self.group = "Tools"
self.addParameter(ParameterFile(lassplit.INPUT, "Input las layer"))
self.addParameter(ParameterNumber(lassplit.NUM_POINTS, "Point in each output file", 1, None, 1000000))
self.addOutput(OutputFile(lassplit.OUTPUT, "Output las file basename"))
self.addCommonParameters()
def processAlgorithm(self, progress):
commands = [os.path.join(LasToolsUtils.LasToolsPath(), "bin", "lassplit.exe")]
commands.append("-i")
commands.append(self.getParameterValue(lassplit.INPUT))
commands.append("-o")
commands.append(self.getOutputValue(lassplit.OUTPUT))
commands.append("-split")
commands.append(self.getParameterValue(lassplit.NUM_POINTS))
self.addCommonParameterValuesToCommand(commands)
LasToolsUtils.runLasTools(commands, progress)
|
Staffjoy/client_python
|
staffjoy/resources/manager.py
|
Python
|
mit
| 206
| 0.004854
|
from staffjoy.resource import Resourc
|
e
class Manager(Resource):
"""Location managers"""
PATH = "organizations/{organization_id}/locations/{location_id}/managers/{user_id}"
|
ID_NAME = "user_id"
|
Azure/azure-sdk-for-python
|
sdk/healthbot/azure-mgmt-healthbot/azure/mgmt/healthbot/__init__.py
|
Python
|
mit
| 680
| 0.002941
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See L
|
icense.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# ------------------------
|
--------------------------------------------------
from ._healthbot import Healthbot
from ._version import VERSION
__version__ = VERSION
__all__ = ['Healthbot']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
|
loicseguin/grades
|
grades/__init__.py
|
Python
|
bsd-3-clause
| 878
| 0.001142
|
#-*- coding: utf-8 -*-
"""\
======
Grades
======
For managing student grades, most teachers use spreadsheet tools. With these
tools, it is hard to maintain grades in plain text files that are easily
readable by humans. The goa
|
l of **Grades** is to let teachers manage their
students's grade in plain text file while providing tools to parse the file and
calculate students and grou
|
p means.
The table format that **Grades** use is the one Emacs `org-mode
<http://orgmode.org/index.html>`_ uses. Using org-mode, grades tables can be
easily set up and then **Grades** will happily compute all the required values.
"""
from __future__ import print_function # For Python 2 compatibility.
__author__ = "Loïc Séguin-C. <loicseguin@gmail.com>"
__license__ = "BSD"
__version__ = "0.3dev"
from . import gradestable
from . import parsers
from . import ui
from . import writers
|
txenoo/django-radio
|
radioco/apps/radioco/management/commands/create_example_data.py
|
Python
|
gpl-3.0
| 212
| 0
|
from django.core
|
.management.base import BaseCommand
from radioco.apps.radioco.utils import create_example_data
class
|
Command(BaseCommand):
def handle(self, *args, **options):
create_example_data()
|
SOM-st/PySOM
|
tests/rtruffle_tests/test_node.py
|
Python
|
mit
| 2,292
| 0
|
import unittest
from rtruffle.node import Node
class NodeTest(unittest.TestCase):
def
|
test_adopt_child(self):
child = ChildNode()
parent = RootNode()
self.assertIsNone(child.parent)
parent.adopt_child(child)
self.assertIs(parent, child.parent)
def test_adopt_children(self):
chil
|
dren = [ChildNode() for _ in range(0, 10)]
parent = RootNode()
self.assertIsNot(children[0], children[1])
for child in children:
self.assertIsNone(child.parent)
parent.adopt_children(children)
for child in children:
self.assertIs(parent, child.parent)
def test_replace_1(self):
child1 = ChildNode()
parent = RootNode(child1, None)
self.assertIs(child1, parent.child_node1)
self.assertIsNone(parent.child_node2)
child2 = ChildNode()
child1.replace(child2)
self.assertIs(child2, parent.child_node1)
self.assertIsNone(parent.child_node2)
def test_replace_2(self):
child1 = ChildNode()
parent = RootNode(None, child1)
self.assertIsNone(parent.child_node1)
self.assertIs(child1, parent.child_node2)
child2 = ChildNode()
child1.replace(child2)
self.assertIsNone(parent.child_node1)
self.assertIs(child2, parent.child_node2)
def test_replace_in_children(self):
child1 = ChildNode()
child2 = ChildNode()
parent = RootNodeWithChildList([child1, child1, child1])
for each in parent.child_nodes:
self.assertIs(each, child1)
child1.replace(child2)
for each in parent.child_nodes:
self.assertIs(each, child2)
class RootNode(Node):
_child_nodes_ = ["child_node1", "child_node2"]
def __init__(self, child_node1=None, child_node2=None):
Node.__init__(self)
self.child_node1 = self.adopt_child(child_node1)
self.child_node2 = self.adopt_child(child_node2)
class RootNodeWithChildList(Node):
_child_nodes_ = ["child_nodes[*]"]
def __init__(self, child_nodes=None):
Node.__init__(self)
assert isinstance(child_nodes, list)
self.child_nodes = self.adopt_children(child_nodes)
class ChildNode(Node):
pass
|
jpetto/olympia
|
src/olympia/access/tests.py
|
Python
|
bsd-3-clause
| 9,223
| 0
|
from django.http import HttpRequest
import mock
import pytest
from nose.tools import assert_false
from olympia import amo
from olympia.amo.tests import TestCase, req_factory_factory
from olympia.amo.urlresolvers import reverse
from olympia.addons.models import Addon, AddonUser
from olympia.users.models import UserProfile
from .acl import (action_allowed, check_addon_ownership, check_ownership,
check_addons_reviewer, check_personas_reviewer,
check_unlisted_addons_reviewer, is_editor, match_rules)
pytestmark = pytest.mark.django_db
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons,Localizers:*',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Personas:Review',
'Locales:Edit',
'Locale.de:Edit',
'Reviews:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
def test_anonymous_user():
# Fake request must not have .groups, just like an anonymous user.
fake_request = HttpRequest()
assert_false(action_allowed(fake_request, amo.FIREFOX, 'Admin:%'))
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/en-US/admin/models/'
r = self.client.get(url)
self.assert3xx(r, '%s?to=%s' % (reverse('users.login'), url))
class TestHasPerm(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasPerm, self).setUp()
assert self.client.login(username='del@icio.us', password='password')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = Addon.objects.get(id=3615)
self.au = AddonUser.objects.get(addon=self.addon, user=self.user)
assert self.au.role == amo.AUTHOR_ROLE_OWNER
self.request = self.fake_request_with_user(self.user)
def fake_request_with_user(self, user):
request = mock.Mock()
request.groups = user.groups.all()
request.user = user
request.user.is_authenticated = mock.Mock(return_value=True)
return request
def login_admin(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
return UserProfile.objects.get(email='admin@mozilla.com')
def test_anonymous(self):
self.request.user.is_authenticated.return_value = False
self.client.logout()
assert not check_addon_ownership(self.request, self.addon)
def test_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_addon_ownership(self.request, self.addon)
assert check_addon_ownership(self.request, self.addon, admin=True)
assert not check_addon_ownership(self.request, self.addon, admin=False)
def test_require_author(self):
assert check_ownership(self.request, self.addon, require_author=True)
def test_require_author_when_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
self.request.groups = self.request.user.groups.all()
assert check_ownership(self.request, self.addon, require_author=False)
assert not check_ownership(self.request, self.addon,
require_author=True)
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert not check_addon_ownership(self.request, self.addon)
self.test_admin()
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert not check_addon_ownership(self.request, self.addon)
self.request.user = self.login_admin()
self.request.groups = self.request.user.groups.all()
assert not check_addon_ownership(self.request, self.addon)
def test_ignore_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert check_addon_ownership(self.request, self.addon,
ignore_disabled=True)
def test_owner(self):
assert check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
def test_dev(self):
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
def test_viewer(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
def test_support(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.A
|
UTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, support=True)
class TestCheckReviewer(TestCase):
fixtures = ['base/addon_3615', 'addons/persona']
def setUp(self):
super(TestCheckReviewer, self).setUp()
self.user = UserProfile.objects.get()
self.person
|
a = Addon.objects.get(pk=15663)
self.addon = Addon.objects.get(pk=3615)
def test_no_perm(self):
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_addons(self):
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_themes(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop
|
quattor/aquilon
|
lib/aquilon/worker/commands/compile_hostname.py
|
Python
|
apache-2.0
| 3,029
| 0.00033
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008-2013,2015-2016,2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Licens
|
e.
"""Contains the logic for `aq compile`."""
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.host import hostname_to_host
from aquilon.worker.templates import Plenary, TemplateDomain
class CommandCompileHostname(BrokerCommand):
required_parameters = ["hostname"]
requires_readonly = True
def render_old(self, session, logger, hostname, pancinclude, pancexclude,
pancdebug, cleandeps, **_
|
):
dbhost = hostname_to_host(session, hostname)
if pancdebug:
pancinclude = r'.*'
pancexclude = r'components/spma/functions.*'
dom = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
logger=logger)
plenary = Plenary.get_plenary(dbhost, logger=logger)
with plenary.get_key():
dom.compile(session, only=plenary.object_templates,
panc_debug_include=pancinclude,
panc_debug_exclude=pancexclude,
cleandeps=cleandeps)
return
def render(self, session, logger, hostname,
pancinclude, pancexclude, pancdebug, cleandeps, **_):
template_domain, plenary = self._preprocess(session, logger, hostname)
if pancdebug:
pancinclude = r'.*'
pancexclude = r'components/spma/functions.*'
self._compile_template_domain(session, template_domain, plenary,
pancinclude, pancexclude, cleandeps)
@staticmethod
def _preprocess(session, logger, hostname):
dbhost = hostname_to_host(session, hostname)
template_domain = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
logger=logger)
plenary = Plenary.get_plenary(dbhost, logger=logger)
return template_domain, plenary
@staticmethod
def _compile_template_domain(session, template_domain, plenary,
pancinclude, pancexclude, cleandeps):
with plenary.get_key():
template_domain.compile(session, only=plenary.object_templates,
panc_debug_include=pancinclude,
panc_debug_exclude=pancexclude,
cleandeps=cleandeps)
|
spacecoalmen/asteroid_scraper
|
asteroid_scraper/finder.py
|
Python
|
mit
| 4,400
| 0.002273
|
from numpy import NaN
from pandas import DataFrame
from asteroid_scraper.math.qfunction import Qfunction
from asteroid_scraper.math.tisserand import tisserand
from asteroid_scraper.utils.progress_bar import ProgressBarThread
class AsteroidFinder(object):
__shared_state = {}
client = None
def _borg_init(self, min_semiax, max_semiax, min_ecce, max_ecce, min_incl, max_incl, sort_key):
self.min_semiax = min_semiax
self.max_semiax = max_semiax
self.min_ecce = min_ecce
self.max_ecce = max_ecce
self.min_incl = min_incl
self.max_incl = max_incl
self.sort_key = sort_key
def __init__(self, min_semiax, max_semiax, min_ecce, max_ecce, min_incl, max_incl, sort_key):
self.__dict__ = self.__shared_state
if not self.__shared_state:
self._borg_init(min_semiax, max_semiax, min_ecce, max_ecce, min_incl, max_incl, sort_key)
def find_asteroids(self, cycler_orbits, asteroids_orbits):
"""
return asteroid orbits that meet the filter requirement, each asteroid have tisserand value and
tisserand delta with the cycler orbit
"""
pb = ProgressBarThread("Computing tisserand")
pb.start()
asteroids_orbits['tisserand'] = asteroids_orbits.apply(lambda row: tisserand(row), axis=1)
cycler_orbits['tisserand'] = cycler_orbits.apply(lambda row: tisserand(row), axis=1)
pb.stop()
pb = ProgressBarThread("Computing Qfunction")
pb.start()
asteroids_orbits['q_function'] = asteroids_orbits.apply(lambda row: Qfunction(row), axis=1)
cycler_orbits['q_function'] = cycler_orbits.apply(lambda row: Qfunction(row), axis=1)
pb.stop()
pb = ProgressBarThread("Scraping asteroids")
pb.start()
# from now, we treat data as dict data structure instead of pandas data frame,
# need more expertise with pandas API ;)
cycler_orbits = cycler_orbits.to_dict(orient="records"
|
)
asteroids_orbits = asteroids_orbits.to_dict(orient="records")
asteroids_orbits = self.filter_asteroids(asteroids_orbits)
results = []
for i, cycler_orbit in enumerate(cycler_orbits):
cycler_tisserand = cycler_orbit['tisserand']
cycler_q_function = cycler_orbit['q_function']
for orbit in asteroids_orbits:
delta_tisserand = self._tisserand_delta(cycler_tisserand,
|
orbit['tisserand'])
delta_q_function = self._q_function_delta(cycler_q_function,
orbit['q_function'])
results.append({'asteroid_id': orbit['id'],
'asteroid_full_name': orbit['full_name'],
'asteroid_tisserand': orbit['tisserand'],
'asteroid_q_function': orbit['q_function'],
'cycler_orbit_index': i,
'cycler_orbit_tisserand': cycler_tisserand,
'cycler_orbit_q_function': cycler_q_function,
'delta_q_function': delta_q_function,
'delta_tisserand': delta_tisserand,
})
# back to pandas data frame data structure
results = DataFrame(results)
results = results.sort_values('delta_' + self.sort_key)
pb.stop()
return results
def filter_asteroids(self, asteroids_orbits):
results = []
# should be filtered using pandas API in order to achieve efficency
for orbit in asteroids_orbits:
if self.min_semiax <= orbit['semiax'] <= self.max_semiax \
and self.min_ecce <= orbit['ecce'] <= self.max_ecce \
and self.min_incl <= orbit['incl'] <= self.max_incl:
results.append(orbit)
return results
def _tisserand_delta(self, t1, t2):
try:
return abs(t1 - t2)
except TypeError:
print 'error on tisserand_delta', t1, t2
return NaN
def _q_function_delta(self, t1, t2):
try:
return abs(t1 - t2)
except TypeError:
print 'error on q_function_delta', t1, t2
return NaN
|
matthewzhenggong/fiwt
|
XbeeZBS2Test/CommandWiFi.py
|
Python
|
lgpl-3.0
| 60,244
| 0.005577
|
#!/bin/env python
# -*- coding: utf-8 -*-
""" @package XBee Zigbee API Test Programme
Funtions include:
1) AT command;
2) Remote AT command
3) Send single TX request with response in const frequency
4) Send continuous TX requests with/without response in const frequency
5) Flow rate predict/measurement in Pps(Packet per Second)
and bps(Bit per Second)
6) Echo RX response for range test
"""
import os
import time
import wx
import string
import logging
import sys
import traceback
import threading
import struct
import socket
from ConfigParser import SafeConfigParser
from butter import Butter
from wx.lib.newevent import NewEvent
import XBeeIPServices
import PayloadPackage as pp
RxEvent, EVT_RSLT1 = NewEvent()
Rx2Event, EVT_RSLT2 = NewEvent()
RxStaEvent, EVT_STAT = NewEvent()
LogEvent, EVT_LOG = NewEvent()
RxCmpEvent, EVT_RSLT1C = NewEvent()
Rx2CmpEvent, EVT_RSLT2C = NewEvent()
RxGndEvent, EVT_RSLT1G = NewEvent()
RxAirEvent, EVT_RSLT1AIR = NewEvent()
log = logging.getLogger(__name__)
def Get14bit(val) :
if val & 0x2000 :
return -(((val & 0x1FFF)^0x1FFF)+1)
else :
return val & 0x1FFF
at_status = {
0: 'OK',
1: 'ERROR',
2: 'Invalid Command',
3: 'Invalid Parameter',
4: 'Tx Failure',
}
moderm_status = {
0: 'Hardware reset',
1: 'Watchdog timer reset',
2: 'Joined network (routers and end devices)',
3: 'Disassociated',
6: 'Coordinator started',
7: 'Network security key was updated',
0x0D: 'Voltage supply limit exceeded (PRO S2B only)',
0x0E: 'Device Cloud connected',
0x0F: 'Device Cloud disconnected',
0x11: 'Modem configuration changed while join in progress',
0x80: 'stack error',
}
discovery_status = {
0x00: 'No Discovery Overhead',
0x01: 'Address Discovery',
0x02: 'Route Discovery',
0x03: 'Address and Route',
0x40: 'Extended Timeout Discovery',
}
delivery_status = {
0x00: 'Success',
0x01: 'MAC ACK Failure',
0x02: 'CCA Failure',
0x03: 'Transmission was purged because it was attempted before stack was completely up',
0x15: 'Invalid destination endpoint',
0x21: 'Network ACK Failure',
0x22: 'Not Joined to Network',
0x23: 'Self-addressed',
0x24: 'Address Not Found',
0x25: 'Route Not Found',
0x26: 'Broadcast source failed to hear a neighbor relay the message',
0x2B: 'Invalid binding table index',
0x2C: 'Resource error lack of free buffers, timers, etc.',
0x2D: 'Attempted broadcast with APS transmission',
0x2E: 'Attempted unicast with APS transmission, but EE=0',
0x32: 'Resource error lack of free buffers, timers, etc.',
0x74: 'Data payload too large',
0x76: 'Attempt to create a client socket fail',
0x77: 'TCP connection to given IP address and port doesn\'t exist',
0x78: 'Source port on a UDP transmission does not match a listening port on the transmitting module',
}
tx_status = {
0x00: 'Success',
0x01: 'No ACK received',
0x02: 'CCA failure',
0x03: 'Purged',
}
recv_opts = {
0x01: 'Packet Acknowledged',
0x02: 'Packet was a broadcast packet',
0x20: 'Packet encrypted with APS encryption',
0x21: 'Packet encrypted with APS encryption',
0x22: 'Broadcast packet encrypted with APS encryption',
0x40: 'Packet was sent from an end device',
0x41: 'Packet was sent from an end device',
0x42: 'Broadcast packet was sent from an end device',
0x61: 'APS-encrypted Packet was sent from an end device',
0x62: 'APS-encrypted Broadcast packet was sent from an end device',
}
ALPHA_ONLY = 1
DIGIT_ONLY = 2
HEX_ONLY = 3
class MyValidator(wx.PyValidator):
def __init__(self, flag=None, pyVar=None):
wx.PyValidator.__init__(self)
self.flag = flag
self.Bind(wx.EVT_CHAR,
|
self.OnChar)
self.hexs = string.digits + 'abcdefABCDEF'
def Clone(self):
return MyValidator(self.flag)
def Validate(self, win):
tc = self.GetWindow()
val = tc.GetValue()
if self.flag == ALPHA_ONLY:
return all([i in string.letters for i in val])
elif self.flag == DIGIT_
|
ONLY:
return all([i in string.digits for i in val])
elif self.flag == HEX_ONLY:
return all([i in self.hexs for i in val])
return True
def OnChar(self, event):
key = event.GetKeyCode()
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
if self.flag == HEX_ONLY and chr(key) in self.hexs:
event.Skip()
return
if self.flag == ALPHA_ONLY and chr(key) in string.letters:
event.Skip()
return
if self.flag == DIGIT_ONLY and chr(key) in string.digits:
event.Skip()
return
if self.flag == DIGIT_ONLY and chr(key) in '-':
event.Skip()
return
if self.flag == DIGIT_ONLY and chr(key) in '.':
event.Skip()
return
if not wx.Validator_IsSilent():
wx.Bell()
# Returning without calling even.Skip eats the event before it
# gets to the text control
return
class RedirectError(object):
def __init__(self):
pass
def write(self, string):
string = string.strip('\r\n\t ')
if string:
log.error(string)
class RedirectInfo(object):
def __init__(self):
pass
def write(self, string):
string = string.strip('\r\n\t ')
if string:
log.info(string)
class RedirectText(object):
def __init__(self, parent):
self.parent = parent
def write(self, string):
wx.PostEvent(self.parent, LogEvent(log=string))
class MyFrame(wx.Frame):
def __init__(self, parent, ID, title,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, ID, title, pos, size, style)
parser = SafeConfigParser()
parser.read('config.ini')
self.parser = parser
panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
box = wx.BoxSizer(wx.HORIZONTAL)
self.btnStart = wx.Button(panel, -1, "Start", size=(100, -1))
box.Add(self.btnStart, 0, wx.ALIGN_CENTER, 5)
box.Add(wx.StaticText(panel, wx.ID_ANY, "Host:"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 1)
self.txtHost = wx.TextCtrl(panel, -1, parser.get('host','AP'), size=(100, -1))
box.Add(self.txtHost, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
self.btnBaseTime = wx.Button(panel, -1, "Set Base Time", size=(100, -1))
self.btnBaseTime.Enable(False)
box.Add(self.btnBaseTime, 0, wx.ALIGN_CENTER, 5)
self.txtRecName = wx.TextCtrl(panel, -1, parser.get('rec','prefix'), )
box.Add(self.txtRecName, 1, wx.ALIGN_CENTER|wx.LEFT, 5)
self.btnALLrec = wx.ToggleButton(panel, -1, "REC")
self.btnALLrec.Enable(False)
box.Add(self.btnALLrec, 0, wx.ALIGN_CENTER, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
AT_CMD = ['MY', 'MK', 'GW', 'SH', 'SL', 'DL', 'C0', 'ID', 'AH', 'MA',
'PL', 'BD', 'AI', 'WR', 'FR',]
HOST_LIST = ["192.168.191.2", "192.168.191.3", "192.168.191.4"]
self.PORT_LIST = ["2616", "2267", "2677", "2000"]
box = wx.BoxSizer(wx.HORIZONTAL)
self.target = 'GND'
self.rbGND = wx.RadioButton(panel, wx.ID_ANY, "GND:",
style=wx.RB_GROUP)
box.Add(self.rbGND, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 1)
self.txtGNDhost = wx.ComboBox(panel, -1, parser.get('host','GND'),
choices=HOST_LIST)
box.Add(self.txtGNDhost, 0, wx.ALIGN_CENTER, 5)
self.txtGNDport = wx.ComboBox(panel, -1, "2616",
choices=self.PORT_LIST[:-1], validator=MyValidator(HEX_ONLY))
box.Add(self.txtGNDport, 0, wx.ALIGN_CENTER, 5)
self.chkGNDsynct = wx.CheckBox(panel, -1, "")
self.chkGNDsynct.SetValue(True)
box.Add(self.chkGNDsynct, 0, wx.ALIGN_CE
|
funkyeah/tiddlyweb
|
test/other/tiddlyweb/serializations/debug.py
|
Python
|
bsd-3-clause
| 701
| 0.001427
|
"""
External serialization for testing remote module loading.
"""
from tiddlyweb.serializations import SerializationInterface
class Serialization(Serialization
|
Interface):
def list_recipes(self, recipes):
print recipes
def list_bags(self, bags):
print bags
def recipe_as(self, recipe
|
):
print "r_as: %s" % recipe
def as_recipe(self, recipe, input):
print "as_r: %s" % input
def bag_as(self, bag):
print "b_as: %s" % bag
def as_bag(self, bag, input):
print "as_b: %s" % input
def tiddler_as(self, tiddler):
print "t_as: %s" % tiddler
def as_tiddler(self, tiddler, input):
print "as_t: %s" % input
|
magic0704/oslo.db
|
oslo_db/tests/sqlalchemy/test_models.py
|
Python
|
apache-2.0
| 5,509
| 0
|
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# htt
|
p://www.apache.org/licenses/LICENSE-2.0
#
#
|
Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslotest import base as oslo_test
from sqlalchemy import Column
from sqlalchemy import Integer, String
from sqlalchemy.ext.declarative import declarative_base
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import test_base
BASE = declarative_base()
class ModelBaseTest(test_base.DbTestCase):
def setUp(self):
super(ModelBaseTest, self).setUp()
self.mb = models.ModelBase()
self.ekm = ExtraKeysModel()
def test_modelbase_has_dict_methods(self):
dict_methods = ('__getitem__',
'__setitem__',
'__contains__',
'get',
'update',
'save',
'items',
'iteritems',
'keys')
for method in dict_methods:
self.assertTrue(hasattr(models.ModelBase, method),
"Method %s() is not found" % method)
def test_modelbase_is_iterable(self):
self.assertTrue(issubclass(models.ModelBase, collections.Iterable))
def test_modelbase_set(self):
self.mb['world'] = 'hello'
self.assertEqual(self.mb['world'], 'hello')
def test_modelbase_update(self):
h = {'a': '1', 'b': '2'}
self.mb.update(h)
for key in h.keys():
self.assertEqual(self.mb[key], h[key])
def test_modelbase_contains(self):
mb = models.ModelBase()
h = {'a': '1', 'b': '2'}
mb.update(h)
for key in h.keys():
# Test 'in' syntax (instead of using .assertIn)
self.assertTrue(key in mb)
self.assertFalse('non-existent-key' in mb)
def test_modelbase_contains_exc(self):
class ErrorModel(models.ModelBase):
@property
def bug(self):
raise ValueError
model = ErrorModel()
model.update({'attr': 5})
self.assertTrue('attr' in model)
self.assertRaises(ValueError, lambda: 'bug' in model)
def test_modelbase_items_iteritems(self):
h = {'a': '1', 'b': '2'}
expected = {
'id': None,
'smth': None,
'name': 'NAME',
'a': '1',
'b': '2',
}
self.ekm.update(h)
self.assertEqual(dict(self.ekm.items()), expected)
self.assertEqual(dict(self.ekm.iteritems()), expected)
def test_modelbase_dict(self):
h = {'a': '1', 'b': '2'}
expected = {
'id': None,
'smth': None,
'name': 'NAME',
'a': '1',
'b': '2',
}
self.ekm.update(h)
self.assertEqual(dict(self.ekm), expected)
def test_modelbase_iter(self):
expected = {
'id': None,
'smth': None,
'name': 'NAME',
}
i = iter(self.ekm)
found_items = 0
while True:
r = next(i, None)
if r is None:
break
self.assertEqual(expected[r[0]], r[1])
found_items += 1
self.assertEqual(len(expected), found_items)
def test_modelbase_keys(self):
self.assertEqual(set(self.ekm.keys()),
set(('id', 'smth', 'name')))
self.ekm.update({'a': '1', 'b': '2'})
self.assertEqual(set(self.ekm.keys()),
set(('a', 'b', 'id', 'smth', 'name')))
def test_modelbase_several_iters(self):
mb = ExtraKeysModel()
it1 = iter(mb)
it2 = iter(mb)
self.assertFalse(it1 is it2)
self.assertEqual(dict(it1), dict(mb))
self.assertEqual(dict(it2), dict(mb))
def test_extra_keys_empty(self):
"""Test verifies that by default extra_keys return empty list."""
self.assertEqual(self.mb._extra_keys, [])
def test_extra_keys_defined(self):
"""Property _extra_keys will return list with attributes names."""
self.assertEqual(self.ekm._extra_keys, ['name'])
def test_model_with_extra_keys(self):
data = dict(self.ekm)
self.assertEqual(data, {'smth': None,
'id': None,
'name': 'NAME'})
class ExtraKeysModel(BASE, models.ModelBase):
__tablename__ = 'test_model'
id = Column(Integer, primary_key=True)
smth = Column(String(255))
@property
def name(self):
return 'NAME'
@property
def _extra_keys(self):
return ['name']
class TimestampMixinTest(oslo_test.BaseTestCase):
def test_timestampmixin_attr(self):
methods = ('created_at',
'updated_at')
for method in methods:
self.assertTrue(hasattr(models.TimestampMixin, method),
"Method %s() is not found" % method)
|
navcoindev/navcoin-core
|
qa/rpc-tests/walletbackup.py
|
Python
|
mit
| 7,262
| 0.001515
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(NavCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum
|
of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.da
|
t")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
brain-research/mirage-rl-bpttv
|
baselines/common/distributions.py
|
Python
|
mit
| 10,920
| 0.009707
|
import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from tensorflow.python.ops import math_ops
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
|
return - self.neglogp(x)
class PdType(object):
"""
|
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class CategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return CategoricalPd
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class MultiCategoricalPdType(PdType):
def __init__(self, low, high):
self.low = low
self.high = high
self.ncats = high - low + 1
def pdclass(self):
return MultiCategoricalPd
def pdfromflat(self, flat):
return MultiCategoricalPd(self.low, self.high, flat)
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [len(self.ncats)]
def sample_dtype(self):
return tf.int32
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return BernoulliPd
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=-1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return U.argmax(self.logits, axis=-1)
def neglogp(self, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
# Note: we can't use sparse_softmax_cross_entropy_with_logits because
# the implementation does not allow second-order derivatives...
one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
return tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits,
labels=one_hot_actions)
def kl(self, other):
a0 = self.logits - U.max(self.logits, axis=-1, keepdims=True)
a1 = other.logits - U.max(other.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = U.sum(ea0, axis=-1, keepdims=True)
z1 = U.sum(ea1, axis=-1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1)
def entropy(self):
a0 = self.logits - U.max(self.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = U.sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (tf.log(z0) - a0), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits))
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalPd(Pd):
def __init__(self, low, high, flat):
self.flat = flat
self.low = tf.constant(low, dtype=tf.int32)
self.categoricals = list(map(CategoricalPd, tf.split(flat, high - low + 1, axis=len(flat.get_shape()) - 1)))
def flatparam(self):
return self.flat
def mode(self):
return self.low + tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32)
def neglogp(self, x):
return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x - self.low, axis=len(x.get_shape()) - 1))])
def kl(self, other):
return tf.add_n([
p.kl(q) for p, q in zip(self.categoricals, other.categoricals)
])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return self.low + tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32)
@classmethod
def fromflat(cls, flat):
raise NotImplementedError
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * U.sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ U.sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return U.sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return U.sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliPd(Pd):
def __init__(self, logits):
self.logits = logits
self.ps = tf.sigmoid(logits)
def flatparam(self):
return self.logits
def mode(self):
return tf.round(self.ps)
def neglogp(self, x):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=-1)
def kl(self, other):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def entropy(self):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.le
|
aacebedo/cloudflarednsupdater
|
environments/build/build.py
|
Python
|
lgpl-3.0
| 9,511
| 0.013984
|
#!/usr/bin/env python3
# This file is part of CFDNSUpdater.
#
# CFDNSUpdater is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CFDNSUpdater is distributed in the hope that it will be useful,
# but WITHbuild ANY WARRANTY; withbuild even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CFDNSUpdater. If not, see <http://www.gnu.org/licenses/>.
import json
import argparse
import subprocess
import sys
import os
import uuid
import time
import shutil
from subprocess import Popen, PIPE
from errno import ENAMETOOLONG
def parseArguments(raw_args):
parser = argparse.ArgumentParser(prog="build",
description='Project Builder')
rootSubparsers = parser.add_subparsers(dest="function")
buildParser = rootSubparsers.add_parser('build', help='Build packages')
buildParser.add_argument('-project', '-p', required=True,
help="Github project", type=str)
buildParser.add_argument('-arch', '-a', required=True,
help='Architecture to build', type=str)
buildParser.add_argument('--branch', '-b', help='Git branch to build',
default="master", type=str)
buildParser.add_argument('-binname', '-bn', required=True,
help='binname', type=str)
buildParser.add_argument('--outputdirpath', '-o', help='Output directory',
required=True, type=str)
deployDescParser = rootSubparsers.add_parser('deploydesc',
help='Create deployement \
descriptor')
deployDescParser.add_argument('--branch', '-b', help='Git branch to build',
required=True, type=str)
deployDescParser.add_argument('-binname', '-bn', required=True,
help='binname', type=str)
deployDescParser.add_argument('-user', '-u', required=True,
help='User', type=str)
deployDescParser.add_argument('-description', '-dc', required=True,
help='Package description', type=str)
deployDescParser.add_argument('--outputdirpath', '-o',
help='Ou
|
tput directory',
required=True, type=str)
deployDescParser.add_argument('--licenses', '-li', help='Software licences',
default=[], type=str, action='append')
deployDescParser.add_argument('--labels', '-la', help='Package labels',
action='append',
default=[], type=str)
return parser.parse_args(raw_args)
def generateTmpDir():
tmp_dir_p
|
ath = None
for x in range(0, 5):
tmp_dir_path = os.path.join(os.path.abspath(os.sep), "tmp", str(uuid.uuid4()))
if not os.path.exists(tmp_dir_path) :
os.makedirs(tmp_dir_path, exist_ok=True)
break
else:
tmp_dir_path = None
if tmp_dir_path == None:
raise Exception("Unable to generate a tmp direcctory")
return tmp_dir_path
def generatePackage(build_dir_path,
package_type, package_name, version, arch, project):
process = subprocess.Popen(["fpm", "-t", package_type,
"-n", package_name,
"-p", build_dir_path,
"-a", arch,
"-f",
"--url","https://www.github.com/{}".format(project),
"-v", version.replace("/", "_"),
"-C", os.path.join(build_dir_path, "packaging"),
"-s", "dir", "."], shell=False)
process.communicate()
if process.returncode != 0:
os.exit("Error while cloning project")
def build(build_dir_path, project, branch, arch, bin_name):
if len(os.listdir(build_dir_path)) != 0:
raise Exception("Build error: {} is not empty.".format(build_dir_path))
go_dir_path = os.path.join(generateTmpDir(), "go")
print("Go path is : {}".format(go_dir_path))
src_dir_path = os.path.join(go_dir_path, 'src', "github.com", project)
process = None
process = subprocess.Popen(["git", "clone", "-b", branch,
"https://github.com/{}".format(project),
src_dir_path], shell=False)
process.communicate()
if process.returncode != 0:
os.exit("Error while cloning project")
process = subprocess.Popen(["go", "get", "-d", "./..."],
cwd=src_dir_path, shell=False,
env=dict(os.environ,
GOARCH=arch,
GOPATH=go_dir_path,
CGO_ENABLED="0"))
process.communicate()
if process.returncode != 0:
sys.exit("Error while getting dependencies project")
process = subprocess.Popen(["go", "install", "./..."],
cwd=src_dir_path, shell=False,
env=dict(os.environ,
GOARCH=arch,
GOPATH=go_dir_path,
CGO_ENABLED="0"))
process.communicate()
if process.returncode != 0:
os.exit("Error while build the project")
bin_dir_path = os.path.join(build_dir_path, "packaging",
"usr", "local", "bin")
os.makedirs(bin_dir_path)
for dirName, _, fileList in os.walk(os.path.join(go_dir_path, "bin")):
for fname in fileList:
shutil.copy2(os.path.join(dirName, fname),
os.path.join(bin_dir_path, fname))
if os.path.exists(os.path.join(src_dir_path, "resources")) :
for name in os.listdir(os.path.join(src_dir_path, "resources")):
shutil.copytree(os.path.join(src_dir_path, "resources", name),
os.path.join(build_dir_path, "packaging", name))
def generateBintrayDescriptor(build_dir,
bin_name,
user,
desc,
version,
licenses=[],
labels=[]):
github_addr = "https://github.com/{}/{}".format(user,bin_name)
descriptor = {"package":{
"name":bin_name,
"repo":bin_name,
"subject":user,
"desc":desc,
"website_url":github_addr,
"issue_tracker_url":github_addr,
"vcs_url":github_addr,
"github_use_tag_release_notes":True,
"licenses":licenses,
"labels":labels,
"public_download_numebrs":False,
"public_stats":False
},
"version":{
"name":version,
"desc":desc,
"released":time.strftime("%Y-%m-%d"),
"vcs_tag":version,
"gpgSign":False
},
"files":[],
"publish":True
}
for distrib in os.listdir(build_dir):
if os.path.isdir(os.path.join(build_dir,distrib)):
for arch in os.listdir(os.path.join(build_dir,distrib)):
if os.path.isdir(os.path.join(build_dir,distrib,arch)) :
descriptor["files"].append({
"includePattern": os.path.join(build_dir,
distrib,
arch,
"(.*\.deb)"),
"uploadPattern": os.path.join(distrib
|
acsone/account-invoicing
|
account_invoice_rounding/__init__.py
|
Python
|
agpl-3.0
| 1,027
| 0
|
# -*- coding: utf-8 -*-
##################
|
############################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This progr
|
am is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import partner
from . import company
from . import res_config
from . import account
|
fyhtea/squeezeDet-hand
|
src/config/voc_squeezeDet_config.py
|
Python
|
bsd-2-clause
| 1,990
| 0.037186
|
"""Model configuration for pascal dataset"""
import numpy as np
from config import base_model_config
def voc_squeezeDet_config():
"""Specify the parameters to tune below."""
mc = base_model_config('PASCAL_VOC')
mc.IMAGE_WIDTH = 320
mc.IMAGE_HEIGHT = 240
mc.BATCH_SIZE = 5
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.001
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 2.0
mc.PLOT_PROB_THRESH = 0.7 #0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.01 #0.005
mc.TOP_N_DETECTION = 12 #64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_G
|
RID = 9
return mc
def set_anchors(m
|
c):
H, W, B = 14, 19, 9
anchor_shapes = np.reshape(
[np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
|
fearlessspider/python-social-auth
|
examples/webpy_example/app.py
|
Python
|
bsd-3-clause
| 3,421
| 0.002631
|
import sys
sys.path.append('../..')
import web
from web.contrib.template import render_jinja
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from social.utils import setting_name
from social.apps.webpy_app.utils import psa, backends
from social.apps.webpy_app import app as social_app
import local_settings
web.config.debug = False
web.config[setting_name('USER_MODEL')] = 'models.User'
web.config[setting_name('AUTHENTICATION_BACKENDS')] = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.twitter.TwitterOAuth',
'social.backends.yahoo.YahooOpenId',
'social.backends.stripe.StripeOAuth2',
'social.backends.persona.PersonaAuth',
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.yahoo.YahooOAuth',
'social.backends.angel.AngelOAuth2',
'social.backends.behance.BehanceOAuth2',
'social.backends.bitbucket.BitbucketOAuth',
'social.backends.box.BoxOAuth2',
'social.backends.linkedin.LinkedinOAuth',
'social.backends.github.GithubOAuth2',
'social.backends.foursquare.FoursquareOAuth2',
'social.backends.instagram.InstagramOAuth2',
'social.backends.live.LiveOAuth2',
'social.backends.vk.VKOAuth2',
'social.backends.dailymotion.DailymotionOAuth2',
'social.backends.disqus.DisqusOAuth2',
'social.backends.dropbox.DropboxOAuth',
'social.backends.eveonline.EVEOnlineOAuth2',
'social.backends.evernote.EvernoteSandboxOAuth',
'social.backends.fitbit.FitbitOAuth2',
'social.backends.flickr.FlickrOAuth',
'social.backends.livejournal.LiveJournalOpenId',
'social.backends.soundcloud.Soundclou
|
dOAuth2',
'social.backends.thisismyjam.ThisIsMyJamOAuth1',
'social.backends.stocktwits.StocktwitsOAuth2',
'social.backends.tripit.TripItOAuth',
'social.backends.clef.ClefOA
|
uth2',
'social.backends.twilio.TwilioAuth',
'social.backends.xing.XingOAuth',
'social.backends.yandex.YandexOAuth2',
'social.backends.podio.PodioOAuth2',
'social.backends.mineid.MineIDOAuth2',
'social.backends.wunderlist.WunderlistOAuth2',
'social.backends.upwork.UpworkOAuth',
)
web.config[setting_name('LOGIN_REDIRECT_URL')] = '/done/'
urls = (
'^/$', 'main',
'^/done/$', 'done',
'', social_app.app_social
)
render = render_jinja('templates/')
class main(object):
def GET(self):
return render.home()
class done(social_app.BaseViewClass):
def GET(self):
user = self.get_current_user()
return render.done(user=user, backends=backends(user))
engine = create_engine('sqlite:///test.db', echo=True)
def load_sqla(handler):
web.ctx.orm = scoped_session(sessionmaker(bind=engine))
try:
return handler()
except web.HTTPError:
web.ctx.orm.commit()
raise
except:
web.ctx.orm.rollback()
raise
finally:
web.ctx.orm.commit()
# web.ctx.orm.expunge_all()
Session = sessionmaker(bind=engine)
Session.configure(bind=engine)
app = web.application(urls, locals())
app.add_processor(load_sqla)
session = web.session.Session(app, web.session.DiskStore('sessions'))
web.db_session = Session()
web.web_session = session
if __name__ == "__main__":
app.run()
|
wcmitchell/insights-core
|
insights/core/filters.py
|
Python
|
apache-2.0
| 2,205
| 0.000454
|
import os
import pkgutil
import re
import six
import yaml as ser
from collections import defaultdict
import insights
from insights.core import dr
# TODO: consider case insensitive and regex
FILTERS = defaultdict(set)
def add_filter(name, patterns):
if isinstance(patterns, six.string_types):
FILTERS[name].add(patterns)
elif isinstance(patterns, list):
FILTERS[name] |
|
= set(patterns)
elif isinstance(patterns, set):
FILTERS[name] |= patterns
else:
raise TypeError("patterns must be string, list, or set.")
def get_filters(component):
filters = set()
if component in FILTERS:
filters |= FILTERS[component]
alias = dr.get_alias(component)
if alias and alias in FILTERS:
filters |= FILTERS[alias]
|
return filters
def apply_filters(target, lines):
results = []
for l in lines:
for f in FILTERS[target]:
if re.search(f, l):
results.append(l)
return results
_filename = ".".join(["filters", ser.__name__])
_dumps = ser.dump
_loads = ser.safe_load
def loads(string):
"""Loads the filters dictionary given a string."""
d = _loads(string)
for k, v in d.items():
FILTERS[dr.get_component(k) or k] = set(v)
def load(stream=None):
"""
Loads filters from a stream, normally an open file. If one is
not passed, filters are loaded from a default location within
the project.
"""
if stream:
loads(stream.read())
else:
data = pkgutil.get_data(insights.__name__, _filename)
return loads(data) if data else None
def dumps():
"""Returns a string representation of the FILTERS dictionary."""
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d)
def dump(stream=None):
"""
Dumps a string representation of `FILTERS` to a stream, normally an
open file. If none is passed, `FILTERS` is dumped to a default location
within the project.
"""
if stream:
stream.write(dumps())
else:
path = os.path.join(os.path.dirname(insights.__file__), _filename)
with open(path, "wu") as f:
f.write(dumps())
|
daumann/chronas-application
|
umap/templates/leaflet_storage/leaflet_storage_tags.py
|
Python
|
mit
| 2,204
| 0.000907
|
from django.utils import simplejson
from django import template
from django.conf import settings
from ..models import DataLayer, TileLayer
from ..views import _urls_for_js
register = template.Library()
@register.inclusion_tag('leaflet_storage/css2.html')
def leaflet_storage_css():
return {
"STATIC_URL": settings.STATIC_URL
}
@register.inclusion_tag('leaflet_storage/js.html')
def leaflet_storage_js(locale=None):
return {
"STATIC_URL": settings.STATIC_URL,
"locale": locale
}
@register.inclusion_tag('leaflet_storage/map_fragment.html')
def map_fragment(map_instance, **kwargs):
layers = DataLayer.objects.filter(map=map_instance)
datalayer_data = [c.metadata for c in layers]
tilelayers = TileLayer.get_list() # TODO: no need to all
map_settings = map_instance.settings
if not "properties" in map_settings:
map_settings['properties'] = {}
map_settings['properties'].update({
'tilelayers': tilelayers,
'datalayers': datalayer_data,
|
'u
|
rls': _urls_for_js(),
'STATIC_URL': settings.STATIC_URL,
"allowEdit": False,
'hash': False,
'attributionControl': False,
'scrollWheelZoom': False,
'datalayersControl': False,
'zoomControl': False,
'storageAttributionControl': False,
'moreControl': False,
'scaleControl': False,
'miniMap': False,
'storage_id': map_instance.pk,
'onLoadPanel': "none",
'captionBar': False,
'default_iconUrl': "%sstorage/src/img/marker.png" % settings.STATIC_URL,
'slideshow': {}
})
map_settings['properties'].update(kwargs)
return {
"map_settings": simplejson.dumps(map_settings),
"map": map_instance
}
@register.simple_tag
def tilelayer_preview(tilelayer):
"""
Return an <img> tag with a tile of the tilelayer.
"""
output = '<img src="{src}" alt="{alt}" title="{title}" />'
url = tilelayer.url_template.format(s="a", z=9, x=265, y=181)
output = output.format(src=url, alt=tilelayer.name, title=tilelayer.name)
return output
@register.filter
def notag(s):
return s.replace('<', '<')
|
gangadhar-kadam/sapphire_app
|
patches/july_2013/p01_remove_doctype_mappers.py
|
Python
|
agpl-3.0
| 516
| 0.017442
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
webnotes.conn.sql("""drop table if exists `tabDocType Mapper`""")
webnotes.conn.sql("""drop table if exists `tabTable Mapper Detail`""")
webnotes.conn.sql("""drop table if ex
|
ists `tabField Mapper Detail`""")
webnotes.delete_doc("DocType", "DocType Map
|
per")
webnotes.delete_doc("DocType", "Table Mapper Detail")
webnotes.delete_doc("DocType", "Field Mapper Detail")
|
sobi-wan/helmet
|
recipes-example/helmet-rootfs/helmet-rootfs-1.0.0/home/root/josh.py
|
Python
|
mit
| 4,911
| 0.038689
|
#!/usr/bin/python
import subprocess
import os
import time
import sys
import threading
import signal
from upload import ftp_open, upload
import gpio
from gpio import setup,mode,read,set,cleanup
led_red=91
led_amber=90
led_green=65
button_switch=95
def updateIndicators(stop_event):
blinker=0
while not stop_event.wait(0.1):
v=read(button_switch)
#print "button-switch", v
if v: ##disabled the blinker## and blinker<15:
set(led_red,0)
else:
set(led_red,1)
blinker=blinker+1
if blinker >= 20:
blinker=0
print 'updateIndicators thread has terminated.'
csi1_video='/dev/' + str(sys.argv[1])
print 'ipu0_csi1 @', csi1_video
# count the restarts due to errors, this value affect the filename see sinkfile definition for details
restarts = 0
# index numbers in filename
idx_nums = 3
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()
def delold():
try:
index_fossil = os.path.getmtime("trilobite")
except: ## there was an error reading/accessing file trilobite, for now we just return
pass
return
for root, dirs, files in os.walk("."):
for name in files:
try:
## delete file if older than X seconds compared to trilobite
if index_fossil > (os.path.getmtime(name)+1000):
#print "del", os.path.join(root,name), ", T:", os.path.getmtime(name)
os.remove( os.path.join(root,name) )
except:
pass
def button_status():
changed=True
old_v=-1
while True: # this will never exit (unless there is an error, maybe?)
v=read(95)
changed=(old_v!=v)
yield (changed,v)
old_v=v
b = button_status()
# v = next(b)
def button():
v = next(b)
#print 'button', v
return v
os.chdir("/media/")
def getGstCmd():
myfile = "usbcam{0}.mkv"
sinkfile = myfile.format( "{1}%0{0}d".format(idx_nums, chr(ord('A')+(restarts%26)) ) )
print "This is my file format:", sinkfile
maxsize=4*1024*1024
gstcmd_csi = (
"gst-launch-1.0 -v -e "
"imxv4l2videosrc capture-mode=1 device={2} ! "
"imxvpuenc_h264 quant-param=20 ! "
"h264parse ! matroskamux ! "
"multifilesink location={0} next-file=4 "
"max-file-size={1}".format(sinkfile,maxsize,csi1_video)
)
gstcmd = (
"gst-launch-1.0 -v -e "
"v4l2src device={2} num-buffers=-1 ! "
"videoconvert ! "
"video/x-raw,format=I420,width=640,height=360,framerate=10/1 ! "
"imxvpuenc_h264 quant-param=20 ! "
"multifilesink post-messages=1 location={0} next-file=4 "
"max-file-size={1}".format(sinkfile,maxsize,"/dev/video1")
)
print "cmd:", gstcmd_csi
return gstcmd_csi
def main():
try:
retval = subprocess.call(getGstCmd(), shell=True)
if retval < 0:
print >>sys.stderr, "Child was terminated by signal", -retval
else:
print >>sys.stderr, "Child returned", retval
except ValueError as e:
print "execution failed:", e
except OSError as e:
print "OS error:", e
except subprocess.CalledProcessError as e:
print "Called process error:", e
except KeyboardInterrupt:
print "user interrupted with ctrl-C"
except:
print "error."
finally:
print "adios!"
def josh():
event_cou
|
nter=0
while button() != (False,1):
time.sleep(0.5)
touch("trilobite")
try:
gstproc = subprocess.Popen(getGstCmd(), shell=True)
except ValueError as e:
print "value error:", e
except OSError as e:
|
print "OS error:", e
except subprocess.CalledProcessError as e:
print "called process error:", e
finally:
print "Popen finished."
while gstproc.poll() is None:
time.sleep(1)
if button()==(True,0):
break
#print ".",
event_counter = event_counter + 1
if event_counter > 10:
event_counter=0
delold()
touch("trilobite")
#print "T"
time.sleep(2)
#gstproc.wait(5)
### when gstproc fails with returncode == 255, it has indicated the video source
### may be incorrect; instead of /dev/video0 (default) it could be /dev/video1, etc.
print "gst process finished, rc=", gstproc.returncode
#gstproc.kill() #terminate()
os.kill(gstproc.pid, signal.SIGINT)
print 'signal.SIGINT:', signal.SIGINT
if __name__ == "__main__":
print "starting josh.py..."
pill2kill = threading.Event()
ioThread = threading.Thread(target=updateIndicators, args=(pill2kill,))
ioThread.start()
while True:
try:
josh()
except KeyboardInterrupt:
pass
break
restarts = restarts + 1
print "...restarting({0}) gst recorder...".format( restarts )
pill2kill.set()
cleanup(led_red)
cleanup(button_switch)
print "Gst Recording script has terminated."
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/d10d998b796b_remove_rule_description_from_rulesql_.py
|
Python
|
cc0-1.0
| 914
| 0.009847
|
"""Remove rule_description from RuleSql table
Revision ID: d10d998b796b
Revises: 5f1470603fa0
Create Date: 2018-03-20 13:46:14.180715
"""
# revision identifiers, used by Alembic.
revision = 'd10d998b796b'
down_revision = '5f1470603fa0'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
|
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please
|
adjust! ###
op.drop_column('rule_sql', 'rule_description')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('rule_sql', sa.Column('rule_description', sa.TEXT(), autoincrement=False, server_default='N/A', nullable=False))
### end Alembic commands ###
|
regebro/doctrine.urwid
|
doctrine/urwid/layout.py
|
Python
|
mit
| 10,162
| 0.001181
|
# -*- coding: UTF-8 -*-
import urwid
from urwid.util import (move_prev_char, move_next_char, calc_width,
calc_text_pos, is_wide_char)
from urwid.text_layout import CanNotDisplayText, TextLayout
from urwid.compat import bytes, PYTHON3, B
ONECHAR_NEWLINES = (u'\n', b'\n', u'\r', b'\r')
TWOCHAR_NEWLINES = (u'\n\r', b'\n\r', u'\r\n', b'\r\n')
def find_newline(text, pos):
l = len(text)
while pos < l:
char = text[pos:pos+1]
if char in ONECHAR_NEWLINES:
return pos
pos += 1
return pos
class CodeLayout(TextLayout):
"""A layout for Urwid that can deal with tabs."""
tab_width = 8
def supports_align_mode(self, align):
"""Return True if align is a supported align mode."""
return align == urwid.LEFT
def supports_wrap_mode(self, wrap):
"""Return True if wrap is a su
|
pported wrap mode."""
return wrap == urwid.SPACE
def layout(self, text, width, align, wrap):
"""Return a layout structure for text."""
try:
segs = self.calculate_text_segments(text, width, wrap)
return self.align_layout(text, width, se
|
gs, wrap, align)
except CanNotDisplayText:
return [[]]
def calculate_text_segments(self, text, width, wrap):
"""
Calculate the segments of text to display given width screen
columns to display them.
text - unicode text or byte string to display
width - number of available screen columns
wrap - wrapping mode used
Returns a layout structure without alignment applied.
"""
# TODO: This function is a horror and a mess, and really hard to
# understand. It's based on urwids StandardLayout, which by itself
# is overly complex, and I added tab handling, which made it worse.
# It's a prime candidate for refacturing, making easier to understand
# and as it is heavily used, profiling would be nice too.
nl, nl_o, sp_o, tab_o = "\n", "\n", " ", "\t"
if PYTHON3 and isinstance(text, bytes):
nl = B(nl) # can only find bytes in python3 bytestrings
nl_o = ord(nl_o) # + an item of a bytestring is the ordinal value
sp_o = ord(sp_o)
tab_o = ord(tab_o)
b = []
p = 0
if wrap == 'clip':
# no wrapping to calculate, so it's easy.
l = []
while p <= len(text):
n_cr = find_newline(text, p)
if p != n_cr:
line = text[p:n_cr]
pt = 0
while pt < len(line):
n_tab = line.find(tab_o, pt)
if n_tab == -1:
end = len(line)
else:
end = n_tab
sc = calc_width(line, pt, end)
if sc != 0:
l.append((sc, p + pt, p + end))
if end == n_tab: # A tab was found
extra_space = (self.tab_width - (
sc % self.tab_width))
l.append((extra_space, p + n_tab))
pt = end + 1
l.append((0, n_cr))
b.append(l)
l = []
if text[n_cr:n_cr+2] in TWOCHAR_NEWLINES:
# Two char newline:
p = n_cr + 2
else:
p = n_cr + 1
return b
while p <= len(text):
# look for next eligible line break
n_cr = find_newline(text, p)
line = text[p:n_cr]
l = []
pt = 0
lc = 0
while pt < len(line):
n_tab = line.find(tab_o, pt)
if n_tab == -1:
end = len(line)
else:
end = n_tab
sc = calc_width(line, pt, end)
if lc + sc <= width:
# this segment fits
if sc:
l.append((sc, p + pt, p + end))
if end == n_tab: # A tab was found
extra_space = self.tab_width - (sc % self.tab_width)
l.append((extra_space, p + n_tab))
lc += extra_space
else:
# removed character hint
l.append((0, p + end))
pt = end + 1
lc += sc
if lc >= width:
# The tab can sometimes push line length to width, and
# then we adjust the line length and make a new line.
overshoot = lc - width
spaces, pos = l[-1]
l[-1] = (spaces - overshoot, pos)
b.append(l)
l = []
lc = 0
continue
# This segment does not fit. Let's fit it.
pos, sc = calc_text_pos(line, pt, end, width - lc)
if pos == pt: # pathological width=1 double-byte case
raise CanNotDisplayText(
"Wide character will not fit in 1-column width")
if wrap == 'any':
l.append((sc, p + pt, p + pos))
l.append((0, p + pos))
b.append(l)
l = []
lc = 0
pt = pos
continue
assert wrap == 'space'
if line[pos] == sp_o:
# perfect space wrap
l.append((sc, p + pt, p + pos))
# removed character hint
l.append((0, p + pos))
b.append(l)
l = []
lc = 0
pt = pos + 1
continue
if is_wide_char(line, pos):
# perfect next wide
l.append((sc, p + pt, p + pos))
b.append(l)
l = []
lc = 0
pt = pos
continue
prev = pos
while prev > pt:
prev = move_prev_char(line, pt, prev)
if line[prev] == sp_o:
sc = calc_width(line, pt, prev)
if prev != pt:
l.append((sc, p + pt, p + prev))
l.append((0, p + prev))
b.append(l)
l = []
lc = 0
pt = prev + 1
break
if is_wide_char(line, prev):
# wrap after wide char
nextc = move_next_char(line, prev, pos)
sc = calc_width(line, pt, nextc)
l.append((sc, p + pt, p + nextc))
b.append(l)
l = []
lc = 0
pt = nextc
break
else:
if lc == 0:
# unwrap previous line space if possible to
# fit more text (we're breaking a word anyway)
if b and (len(b[-1]) == 2 or (len(b[-1]) == 1 and
len(b[-1][0]) == 2)):
# look for removed space above
if len(b[-1]) == 1:
[(h_sc, h_off)] = b[-1]
p_sc = 0
p_off = p_end = h_off
else:
[(p_sc, p_off, p_end),
(h_sc, h_off)] = b[-1][-2:]
if (p_sc < width and h_sc == 0 and
|
pcecconi/mapground
|
layers/migrations/0007_auto_20180923_2151.py
|
Python
|
mit
| 1,658
| 0.001206
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-23 21:51
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('layers', '0006_auto_20180811_1412'),
]
operations = [
migrations.AddField(
model_name='capa',
name='cantidad_de_bandas',
field=models.IntegerField(blank=True, nul
|
l=True),
),
migrations.AddField(
model_name='capa',
name='gdal_driver_longname',
field=models.CharField(blank=True, default='', max_length=100, verbose_name='Driver - Long Name'),
),
migrations.AddField(
model_name='capa',
name='gdal_driver_shortname',
field=models.CharF
|
ield(blank=True, default='', max_length=100, verbose_name='Driver - Short Name'),
),
migrations.AddField(
model_name='capa',
name='gdal_metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AddField(
model_name='capa',
name='proyeccion_proj4',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AddField(
model_name='capa',
name='size_height',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='capa',
name='size_width',
field=models.IntegerField(blank=True, null=True),
),
]
|
enthought/etsproxy
|
enthought/chaco/overlays/simple_inspector_overlay.py
|
Python
|
bsd-3-clause
| 108
| 0
|
# proxy module
from __future__ import absolute_import
from chaco.overlays.si
|
mple_inspector_overla
|
y import *
|
hfinucane/ansible
|
lib/ansible/plugins/action/__init__.py
|
Python
|
gpl-3.0
| 32,910
| 0.003981
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import pipes
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.module_common import modify_module
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes, to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._supports_check_mode = True
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Temporary directory. Sometimes an action plugin sets up
a temporary directory and then calls another module. This parameter
allows us to reuse the same directory for both.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
# store the module invocation details into the results
results = {}
if self._task.async == 0:
results['invocation'] = dict(
module_name = self._task.action,
module_args = self._task.args,
)
return results
def _configure_module(self, module_name, module_args, task_vars=None):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# win_stat, win_file, and win_copy are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
if '.ps1' in self._connection.module_implementation_preferences:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars)
return (module_style, module_shebang, module_data)
def _compute_environment_string(self):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [ environments ]
# the environments as inherited need to be reversed, to make
# sure we merge in the parent's values first so those in the
# block then task 'win' in precedence
environments.reverse()
for environment in environments:
if environment is None:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
final_environm
|
ent = self._templar.template(final_environment)
return self._connect
|
ion._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a temp path should be created before the action is executed.
'''
return getattr(self, 'TRANSFERS_FILES', False)
def _late_needs_tmp_path(self, tmp, module_style):
'''
Determines if a temp path is required after some early actions have already taken place.
'''
if tmp and "tmp" in tmp:
# tmp has already been created
return False
if not self._connection.has_pipelining or not self._play_context.pipelining or C
|
DebVortex/ariane-old-
|
ariane/apps/frontend/apps.py
|
Python
|
bsd-3-clause
| 235
| 0
|
from django.apps import AppConfig
from django.utils.translation i
|
mport ugettext_lazy as _
class FrontendConfig(AppConfig):
"""Configuration for frontend app."""
name = 'ariane.apps.frontend'
verbose_name =
|
_("Frontend")
|
Xarrow/pySimulatedDNS
|
dnsCat/__init__.py
|
Python
|
apache-2.0
| 139
| 0
|
# -*- coding:utf-8 -*-
"""
Verion: 1.0
Author: zhangjian
Site: http://iliangqunru.com
File:
|
__init__.py.py
Time: 2
|
017/7/22 2:19
"""
|
bandienkhamgalan/flappyeagle
|
views/DraggableComponentButton.py
|
Python
|
gpl-3.0
| 549
| 0.018215
|
#!/usr/bin/env python3
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPixmap, QMouseEvent
from PyQt5.QtWidgets import QToolButton
from PyQt5.QtCore import Qt,
|
pyqtSignal
fro
|
m models.components import *
class DraggableComponentButton(QToolButton):
mousePress = pyqtSignal(ComponentType, QMouseEvent, name='mousePress')
def __init__(self, parent=None):
QToolButton.__init__(self, parent)
self.componentType = None
def mousePressEvent(self, event):
self.checked = False
self.mousePress.emit(self.componentType, event)
|
google-research/google-research
|
coltran/models/layers.py
|
Python
|
apache-2.0
| 24,446
| 0.006054
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apa
|
che.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various base layers for the colorization transformer."""
from __f
|
uture__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import operator
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.compat.v2.keras import layers
from coltran.utils import att_utils
from coltran.utils import base_utils
# pylint: disable=duplicate-string-formatting-argument
def residual_dropout(inputs, output, dropout, training):
"""out = inputs + dropout(output)."""
if training and dropout:
output = tf.nn.dropout(output, dropout)
output += inputs
return output
class Shift(layers.Layer):
"""Shifts an input tensor either down or right to preserve causal ordering."""
def __init__(self, dimension, resolution, **kwargs):
"""Init.
Args:
dimension: int, 0 to shift down, 1 to shift right.
resolution: list of 2 ints, [H, W].
**kwargs:
"""
super(Shift, self).__init__(**kwargs)
self.dimension = dimension
self.resolution = resolution
def call(self, x):
shape = x.shape
rank = len(shape)
dim = self.dimension + 1
# Assume 1 batch_dim.
index = [0] * len(self.resolution)
y = x
paddings = np.zeros((rank, 2), dtype=np.int32)
paddings[dim, 0] = 1
y = tf.pad(y, paddings)
rem_dims = rank - 1 - len(index[:dim])
slice_inds = [0] + index[:dim] + [0] * rem_dims
return tf.slice(y, slice_inds, shape)
class Cache(layers.Layer):
"""Keras layer for cacheing.
Values are cached in a tensor of shape (B, canvas_shape, D).
B and D are inferred from the inputs to the call method.
Every call to the cache instance is assumed to be a tuple of (index, values).
It updates the cache such that cache[:, index:, :] = values
"""
def __init__(self, canvas_shape,
num_batch_axes=1,
dtype=tf.float32,
**kwargs):
super(Cache, self).__init__(trainable=False, **kwargs)
self.canvas_shape = canvas_shape
self.num_batch_axes = num_batch_axes
self._dtype = dtype
def build(self, input_shapes):
num_canvas_dim = len(self.canvas_shape)
value, _ = input_shapes
features_shape = value[self.num_batch_axes + num_canvas_dim:]
cache_shape = (value[:self.num_batch_axes] + self.canvas_shape +
features_shape)
self.cache = tf.zeros(shape=cache_shape, dtype=self._dtype)
super(Cache, self).build(input_shapes)
def reset(self):
self.cache = tf.zeros(shape=self.cache.shape, dtype=self._dtype)
def call(self, inputs):
value, index = inputs
if self.cache.shape == inputs[0].shape:
self.cache = value
return value
shape = self.cache.shape.as_list()
num_index_axes = index.shape[0]
num_batch_axes = self.num_batch_axes
num_feature_axes = len(shape) - num_index_axes - num_batch_axes
features_shape = shape[num_batch_axes + num_index_axes:]
batch_shape = shape[:num_batch_axes]
value_index_shape = tf.shape(value)[num_batch_axes:-num_feature_axes]
if tf.reduce_max(value_index_shape) > 1:
# This is a block update starting at index.
value_ranges = []
for i, s in enumerate(tf.unstack(value_index_shape)):
curr_range = tf.range(index[i], index[i] + s)
value_ranges.append(curr_range)
batch_ranges = [tf.range(s) for s in batch_shape]
mesh = tf.meshgrid(*(batch_ranges + value_ranges), indexing='ij')
indices = tf.stack(mesh, axis=-1)
indices = tf.reshape(indices, [-1, num_index_axes + num_batch_axes])
else:
# This is a single update at index position.
batch_ranges = [tf.range(s) for s in batch_shape]
mesh = tf.meshgrid(*batch_ranges, indexing='ij')
batch_indices = tf.stack(mesh, axis=-1)
batch_indices = tf.reshape(batch_indices, [-1, num_batch_axes])
# Add leading axes to nd-index and tile to get batched indices.
shape_indices = tf.reshape(index, [1] * num_batch_axes + [-1])
shape_indices = tf.tile(shape_indices, batch_shape + [1])
shape_indices = tf.reshape(shape_indices, [-1, num_index_axes])
indices = tf.concat([batch_indices, shape_indices], axis=-1)
# We need to squeeze nd-axes from value before updating.
value = tf.reshape(value, [-1] + features_shape)
self.cache = tf.tensor_scatter_nd_update(self.cache, indices, value)
return self.cache
class Masking(object):
"""Masking options for self-attention.
We can either mask the entire future, i.e. allow looking into the past and
the current element, or we can mask in addition the present as well, i.e.,
we can look only to the past.
"""
FUTURE = 'future'
FUTURE_PRESENT = 'future_present'
class PositionEmbed(layers.Layer):
"""Adds factorized positional embeddings for specified axes."""
def __init__(self, axes, max_lengths=None, **kwargs):
"""Init.
Args:
axes: list of ints, axis over which to apply the positional embeddings.
max_lengths: list of ints, maximum length over each axis.
**kwargs:
"""
super(PositionEmbed, self).__init__(**kwargs)
if not isinstance(axes, (list, tuple)):
axes = [axes]
self.axes = axes
self.max_lengths = None
if max_lengths:
if not isinstance(max_lengths, (list, tuple)):
max_lengths = [max_lengths]
self.max_lengths = max_lengths
def build(self, input_shape):
rank = len(input_shape)
self.axes = sorted([rank + a if a < 0 else a for a in self.axes])
self.max_lengths = self.max_lengths or [input_shape[a] for a in self.axes]
self.embeddings = []
for i, axis in enumerate(self.axes):
shape = [self.max_lengths[i]] + [1] * (rank - axis - 2)
shape.append(input_shape[-1])
init = tf.keras.initializers.RandomNormal(stddev=shape[-1]**-0.5)
self.embeddings.append(
self.add_weight(
name='position_embedding_%d' % i,
shape=shape,
initializer=init,
trainable=True))
super(PositionEmbed, self).build(input_shape)
def call(self, inputs):
out = inputs
for e in self.embeddings:
out += e
return out
class DenseND(layers.Layer):
"""Maps a rank-m tensor to a rank-n tensor through a dense contraction."""
def __init__(self,
filters,
contract_axes=1,
use_bias=False,
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(DenseND, self).__init__(**kwargs)
if isinstance(filters, int):
filters = [filters]
self.filters = tuple(filters)
self.contract_axes = contract_axes
self.use_bias = use_bias
self.activation = tf.keras.activations.get(activation)
self.bias_initializer = bias_initializer
self._kernel_initializer = kernel_initializer
# Behaviours differ when shape(weights) > 2.
# see: https://github.com/tensorflow/tensorflow/blob/r2.1/tensorflow/python/ops/init_ops_v2.py#L733 pylint: disable=line-too-long
if self._kernel_initializer == 'glorot_uniform_nd':
self._kernel_initializer = self._glorot_uniform
def _num_batch_axes(self, input_shape):
"""Returns number of batch axes in inputs."""
return len(input_shape) - len(self.contract_shape)
def _glorot_uniform(self, shape, dtype=tf.float32):
"""Glorot uniform initializer."""
fan_out = functools.reduce(operator.mul, self.filters)
fan_in = functools.reduce(operator.mul, sha
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.