repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Melraidin/docker-py
|
docker/constants.py
|
Python
|
apache-2.0
| 173
| 0
|
DEFAULT_DOCKER_API_VERSIO
|
N = '1.19'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONT
|
AINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus'
]
|
tylercal/dragonfly
|
dragonfly/test/test_contexts.py
|
Python
|
lgpl-3.0
| 4,426
| 0.000226
|
#
# This file is part of Dragonfly.
# (c) Copyright 2018 by Dane Finlay
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragon
|
fly. If not, see
# <http://www.gnu.org/lice
|
nses/>.
#
import unittest
from dragonfly import CompoundRule, MimicFailure
from dragonfly.test import (RuleTestCase, TestContext, RuleTestGrammar)
# ==========================================================================
class TestRules(RuleTestCase):
def test_multiple_rules(self):
""" Verify that the engine successfully mimics each rule in a
grammar with multiple rules. """
self.add_rule(CompoundRule(name="r1", spec="hello"))
self.add_rule(CompoundRule(name="r2", spec="see you"))
assert self.recognize_node("hello").words() == ["hello"]
assert self.recognize_node("see you").words() == ["see", "you"]
def test_rule_context(self):
""" Verify that the engine works correctly with rule contexts. """
context = TestContext(True)
self.add_rule(CompoundRule(name="r1", spec="test context",
context=context))
# Test that the rule matches when in-context.
results = self.recognize_node("test context").words()
assert results == ["test", "context"]
# Go out of context and test again.
# Use the engine's mimic method because recognize_node won't return
# RecognitionFailure like ElementTester.recognize does.
context.active = False
self.assertRaises(MimicFailure, self.engine.mimic, "test context")
# Test again after going back into context.
context.active = True
results = self.recognize_node("test context").words()
assert results == ["test", "context"]
def test_grammar_context(self):
""" Verify that the engine works correctly with grammar
contexts."""
# Recreate the RuleTestGrammar using a context and add a rule.
context = TestContext(True)
self.grammar = RuleTestGrammar(context=context)
self.add_rule(CompoundRule(name="r1", spec="test context"))
# Test that the rule matches when in-context.
results = self.recognize_node("test context").words()
assert results == ["test", "context"]
# Go out of context and test again.
context.active = False
self.assertRaises(MimicFailure, self.engine.mimic, "test context")
# Test again after going back into context.
context.active = True
results = self.recognize_node("test context").words()
assert results == ["test", "context"]
def test_exclusive_grammars(self):
""" Verify that the engine supports exclusive grammars. """
# This is here as grammar exclusivity is context related.
# Set up two grammars to test with.
grammar1 = self.grammar
grammar1.add_rule(CompoundRule(spec="grammar one"))
grammar2 = RuleTestGrammar(name="Grammar2")
grammar2.add_rule(CompoundRule(spec="grammar two"))
grammar1.load()
grammar2.load()
# Set grammar1 as exclusive and make some assertions.
grammar1.set_exclusiveness(True)
results = grammar1.recognize_node("grammar one").words()
assert results == ["grammar", "one"]
self.assertRaises(MimicFailure, self.engine.mimic, "grammar two")
# Set grammar1 as no longer exclusive and make some assertions.
grammar1.set_exclusiveness(False)
results = grammar1.recognize_node("grammar one").words()
assert results == ["grammar", "one"]
results = grammar2.recognize_node("grammar two").words()
assert results == ["grammar", "two"]
# ==========================================================================
if __name__ == "__main__":
unittest.main()
|
tulikavijay/vms
|
vms/shift/views.py
|
Python
|
gpl-2.0
| 26,821
| 0.003169
|
# standard library
from datetime import date
# third party
from braces.views import LoginRequiredMixin, AnonymousRequiredMixin
# Django
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import TemplateView, DeleteView, ListView
from django.views.generic.edit import FormView, UpdateView
from django.utils.decorators import method_decorator
# local Django
from job.services import *
from shift.forms import HoursForm, ShiftForm
from shift.models import Shift
from shift.services import *
from volunteer.forms import SearchVolunteerForm
from volunteer.services import get_all_volunteers, search_volunteers
from volunteer.utils import vol_id_check
from vms.utils import check_correct_volunteer, check_correct_volunteer_shift
class AdministratorLoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
user = request.user
admin = None
try:
admin = user.administrator
except ObjectDoesNotExist:
pass
if not admin:
return render(request, 'vms/no_admin_rights.html')
else:
return super(AdministratorLoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class AddHoursView(LoginRequiredMixin, FormView):
template_name = 'shift/add_hours.html'
form_class = HoursForm
@method_decorator(check_correct_volunteer_shift)
def dispatch(self, *args, **kwargs):
return super(AddHoursView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(AddHoursView, self).get_context_data(**kwargs)
shift_id = self.kwargs['shift_id']
volunteer_id = self.kwargs['volunteer_id']
context['volunteer_id'] = volunteer_id
context['shift_id'] = shift_id
context['shift'] = get_shift_by_id(shift_id)
return context
def form_valid(self, form):
shift_id = self.kwargs['shift_id']
volunteer_id = self.kwargs['volunteer_id']
shift = get_shift_by_id(shift_id)
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
shift_start_time = shift.start_time
shift_end_time = shift.end_time
try:
if (end_time > start_time):
if (start_time >= shift_start_time and end_time <= shift_end_time):
add_shift_hours(
volunteer_id,
shift_id,
start_time,
end_time
)
return HttpResponseRedirect(reverse(
'shift:view_hours',
args=(volunteer_id,)
))
else:
messages.add_message(self.request, messages.INFO, 'Logged hours should be between shift hours')
return render(
self.request,
'shift/add_hours.html',
{'form': form, 'shift_id': shift_id, 'volunteer_id': volunteer_id, 'shift':shift,}
)
else:
messages.add_message(self.request, messages.INFO, 'End time should be greater than start time')
return render(
self.request,
'shift/add_hours.html',
{'form': form, 'shift_id': shift_id, 'volunteer_id': volunteer_id, 'shift':shift,}
)
except:
raise Http404
class AddHoursManagerView(AdministratorLoginRequiredMixin, FormView):
template_name = 'shift/add_hours_manager.html'
form_class = HoursForm
def get_context_data(self, **kwargs):
context = super(AddHoursManagerView, self).get_context_data(**kwargs)
shift_id = self.kwargs['shift_id']
volunteer_id = self.kwargs['volunteer_id']
context['volunteer_id'] = volunteer_id
context['shift_id'] = shift_id
context['shift'] = get_shift_by_id(shift_id)
return context
def form_valid(self, form):
shift_id = self.kwargs['shift_id']
volunteer_id = self.kwargs['volunteer_id']
shift = get_shift_by_id(shift_id)
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
shift_start_time = shift.start_time
shift_end_time = shift.end_time
try:
if (end_time > start_time):
if (start_time >= shift_start_time and end_time <= shift_end_time):
add_shift_hours(
volunteer_id,
shift_id,
start_time,
end_time
)
return HttpResponseRedirect(reverse(
'shift:manage_volunteer_shifts',
args=(volunteer_id,)
))
else:
messages.add_message(self.request, messages.INFO, 'Logged hours should be between shift hours')
return render(
self.request,
'shift/add_hours_manager.html',
{'form': form, 'shift_id': shift_id, 'volunteer_id': volunteer_id, 'shift':shift,}
)
else:
messages.add_message(self.request, messages.INFO, 'End time should be greater than start time')
return render(
self.request,
'shift/add_hours_manager.html',
{'form': form, 'shift_id': shift_id, 'volunteer_id': volunteer_id, 'shift':shift,}
)
except:
raise Http404
@login_required
def cancel(request, shift_id, volunteer_id):
if shift_id and volunteer_id:
user = request.user
admin = None
volunteer = None
try:
admin = user.administrator
except ObjectDoesNotExist:
pass
try:
volunteer = user.volunteer
except ObjectDoesNotExist:
pass
# check that either an admin or volunteer is logged in
if not admin and not volunteer:
return render(
request,
'vms/no_volunteer_rights.html',
status=403
)
# if a volunteer is logged in, verify that they are canceling their own shift
if volunteer:
if (int(volunteer.id) != int(volunteer_id)):
return render(
request,
'vms/no_volunteer_rights.html',
|
status=403
)
if request.method == 'POST':
try:
cancel_shift_registration(volunteer_id, shift_id)
if admin:
return HttpResponseRedirect(reverse(
'shift:manage_volunteer_shifts',
|
args=(volunteer_id, )
))
elif volunteer:
return HttpResponseRedirect(reverse(
'shift:view_volunteer_shifts',
args=(volunteer_id, )
))
else:
raise Http404
except:
raise Http404
else:
return render(
request,
'shift/cancel_shift.html',
{'shift_id': shift_id, 'volunteer_id': volunteer_id}
)
else:
raise Http404
class ClearHoursView(LoginRequiredMixin, TemplateView):
template_name = 'shift/clear_hours.html'
suc
|
xifle/home-assistant
|
homeassistant/components/binary_sensor/enocean.py
|
Python
|
mit
| 2,747
| 0
|
"""
Support for EnOcean binary sensors.
For more details about this platform, please refer to the documentation at
https://hom
|
e-assistant.io/components/binary_sensor.enocean/
"""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA, SENSOR_CLASSES_SCHEMA)
fro
|
m homeassistant.components import enocean
from homeassistant.const import (CONF_NAME, CONF_ID, CONF_SENSOR_CLASS)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['enocean']
DEFAULT_NAME = 'EnOcean binary sensor'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ID): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SENSOR_CLASS, default=None): SENSOR_CLASSES_SCHEMA,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Binary Sensor platform fo EnOcean."""
dev_id = config.get(CONF_ID)
devname = config.get(CONF_NAME)
sensor_class = config.get(CONF_SENSOR_CLASS)
add_devices([EnOceanBinarySensor(dev_id, devname, sensor_class)])
class EnOceanBinarySensor(enocean.EnOceanDevice, BinarySensorDevice):
"""Representation of EnOcean binary sensors such as wall switches."""
def __init__(self, dev_id, devname, sensor_class):
"""Initialize the EnOcean binary sensor."""
enocean.EnOceanDevice.__init__(self)
self.stype = "listener"
self.dev_id = dev_id
self.which = -1
self.onoff = -1
self.devname = devname
self._sensor_class = sensor_class
@property
def name(self):
"""The default name for the binary sensor."""
return self.devname
@property
def sensor_class(self):
"""Return the class of this sensor."""
return self._sensor_class
def value_changed(self, value, value2):
"""Fire an event with the data that have changed.
This method is called when there is an incoming packet associated
with this platform.
"""
self.update_ha_state()
if value2 == 0x70:
self.which = 0
self.onoff = 0
elif value2 == 0x50:
self.which = 0
self.onoff = 1
elif value2 == 0x30:
self.which = 1
self.onoff = 0
elif value2 == 0x10:
self.which = 1
self.onoff = 1
self.hass.bus.fire('button_pressed', {"id": self.dev_id,
'pushed': value,
'which': self.which,
'onoff': self.onoff})
|
gitesei/faunus
|
examples/pythontest.py
|
Python
|
mit
| 4,462
| 0.032048
|
#!/usr/bin/env python
import json
import unittest
import numpy as np
from math import pi
import sys
sys.path.insert(0,'../')
from pyfaunus import *
# Dictionary defining input
d = {}
d['geometry'] = { 'type': 'cuboid', 'length': 50 }
d['atomlist'] = [
{ 'Na': dict( r=2.0, eps=0.05, q=1.0, tfe=1.0 ) },
{ 'Cl': dict( r=1.2, eps=0.05, q=-1.0, tfe=1.0 ) }
]
d['moleculelist'] = [
{ 'salt': dict(atoms=['Na','Cl'], atomic=True) }
]
d['insertmolecules'] = [
{ 'salt': dict( N=1 ) }
]
d['energy'] = [
{ 'isobaric' : {'P/atm': 0.1} }
]
d['analysis'] = [
{ 'sanity' : dict( Nstep=10 ) }
]
# Create a simulation Space
# this will:
# - initialize `atoms` from dictionary
# - add particles / molecules
setTemperature(300) # must be set before atom/molecule properties are read
spc = Space()
spc.from_dict(d)
pot = Hamiltonian(spc, d['energy'])
analysis = Analysis(spc, pot, d['analysis'])
# Test temperature
class TestGlobals(unittest.TestCase):
def test_temperature(self):
self.assertAlmostEqual(getTemperature()
|
, 300)
# Loop over atom types
for i in atoms:
print("atom name and diameter:", i.name, i.sigma)
# Test Coulomb
class TestCoulomb(unittest.TestCase):
# this doesn't test anything yet
def test_pairpot(self):
d = { 'default' : [
{ 'coulomb': { 'epsr': 80, 'type': 'qpotential', 'cutoff': 100, 'order': 5 } }
]
|
}
pot = FunctorPotential( d )
r = np.linspace(1,10,5)
u = np.array( [pot.energy( spc.p[0], spc.p[1], [0,0,i] ) for i in r] )
pot.selfEnergy(spc.p[0])
# Test SASA calculations
class TestSASA(unittest.TestCase):
def test_pairpot(self):
d = { 'default' : [
{ 'sasa': { 'molarity': 1.5, 'radius': 1.4, 'shift': False } }
] }
pot = FunctorPotential( d )
r = np.linspace(0,10,5)
u = np.array( [pot.energy( spc.p[0], spc.p[1], [0,0,i] ) for i in r] )
np.testing.assert_almost_equal(np.divide(u, [87.3576,100.4613,127.3487,138.4422,138.4422]), 1., 4)
def test_freesasa_hamiltonian(self):
H = Hamiltonian(spc, [ {'sasa' : {'molarity': 1.5, 'radius': 1.4}} ] )
spc.p[0].pos = [0,0,0] # fix 1st particle in origin
c = Change() # change object telling that a full energy calculation
c.all = True; # should be performed when calling `energy()`
u = []
r = np.linspace(0,10,5)
for i in r: # loop over particle-particle distances
spc.p[1].pos = [0,0,i]
u.append( H.energy(c) )
np.testing.assert_almost_equal(np.divide(u, np.multiply(1.26, [87.3576,100.4613,127.3487,138.4422,138.4422])), 1/1.26, 2) # 2.5% error
# Geometry
class TestGeometry(unittest.TestCase):
def test_cuboid(self):
geo = Chameleon( dict(type="cuboid", length=[2,3,4]) )
V = geo.getVolume();
self.assertAlmostEqual(V, 2*3*4, msg="volume")
a = geo.boundary( [1.1, 1.5, -2.001] );
self.assertAlmostEqual(a[0], -0.9)
self.assertAlmostEqual(a[1], 1.5)
self.assertAlmostEqual(a[2], 1.999)
geo.setVolume(123.4);
self.assertAlmostEqual( geo.getVolume(), 123.4);
rnd = Random()
for i in range(1000):
pos = geo.randompos(rnd);
self.assertEqual( geo.collision( pos ), False )
def test_sphere(self):
r = 15.0
geo = Chameleon( dict(type="sphere", radius=r) )
V = geo.getVolume();
self.assertAlmostEqual(V, 4*pi/3*r**3)
self.assertEqual( geo.collision([0,r+0.001,0]), True)
self.assertEqual( geo.collision([0,0,r-0.001]), False)
geo.setVolume(123.4);
self.assertAlmostEqual( geo.getVolume(), 123.4);
rnd = Random()
for i in range(1000):
pos = geo.randompos(rnd);
self.assertEqual( geo.collision( pos ), False )
class TestSpeciation(unittest.TestCase):
def test_IdealTerm(self):
spc = Space()
spc.from_dict(d)
c = Change()
g = spc.groups[0]
self.assertEqual( IdealTerm(spc, spc, c), 0 )
self.assertEqual( g.capacity(), 2 )
self.assertEqual( len(g), 2 )
g.deactivate( g.end()-1, g.end() ) # deactivate last atom
self.assertEqual( len(g), 1 )
# add more...
if __name__ == '__main__':
unittest.main()
|
BITalinoWorld/python-serverbit
|
twisted-ws/txws.py
|
Python
|
gpl-3.0
| 19,519
| 0.000768
|
# Copyright (c) 2011 Oregon State University Open Source Lab
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Blind reimplementation of WebSockets as a standalone wrapper for Twisted
protocols.
"""
__version__ = "0.7.1"
from base64 import b64encode, b64decode
from hashlib import md5, sha1
from string import digits
from struct import pack, unpack
from twisted.internet.interfaces import ISSLTransport
from twisted.protocols.policies import ProtocolWrapper, WrappingFactory
from twisted.python import log
from twisted.web.http import datetimeToString
class WSException(Exception):
"""
Something stupid happened here.
If this class escapes txWS, then something stupid happened in multiple
places.
"""
# Flavors of WS supported here.
# HYBI00 - Hixie-76, HyBi-00. Challenge/response after headers, very minimal
# framing. Tricky to start up, but very smooth sailing afterwards.
# HYBI07 - HyBi-07. Modern "standard" handshake. Bizarre masked frames, lots
# of binary data packing.
# HYBI10 - HyBi-10. Just like HyBi-07. No, seriously. *Exactly* the same,
# except for the protocol number.
# RFC6455 - RFC 6455. The official WebSocket protocol standard. The protocol
# number is 13, but otherwise it is identical to HyBi-07.
HYBI00, HYBI07, HYBI10, RFC6455 = range(4)
# States of the state machine. Because there are no reliable byte counts for
# any of this, we don't use StatefulProtocol; instead, we use custom state
# enumerations. Yay!
REQUEST, NEGOTIATING, CHALLENGE, FRAMES = range(4)
# Control frame specifiers. Some versions of WS have control signals sent
# in-band. Adorable, right?
NORMAL, CLOSE, PING, PONG = range(4)
opcode_types = {
0x0: NORMAL,
0x1: NORMAL,
0x2: NORMAL,
0x8: CLOSE,
0x9: PING,
0xa: PONG,
}
encoders = {
"base64": b64encode,
}
decoders = {
"base64": b64decode,
}
# Fake HTTP stuff, and a couple convenience methods for examining fake HTTP
# headers.
def http_headers(s):
"""
Create a dictionary of data from raw HTTP headers.
"""
d = {}
for line in s.split("\r\n"):
try:
key, value = [i.strip() for i in line.split(":", 1)]
d[key] = value
except ValueError:
pass
return d
def is_websocket(headers):
"""
Determine whether a given set of headers is asking for WebSockets.
"""
return ("Upgrade" in headers.get("Connection", "")
and headers.get("Upgrade").lower() == "websocket")
def is_hybi00(headers):
"""
Determine whether a given set of headers is HyBi-00-compliant.
Hixie-76 and HyBi-00 use a pair of keys in the headers to handshake with
servers.
"""
return "Sec-WebSocket-Key1" in headers and "Sec-WebSocket-Key2" in headers
# Authentication for WS.
def complete_hybi00(headers, challenge):
"""
Generate the response for a HyBi-00 challenge.
"""
key1 = headers["Sec-WebSocket-Key1"]
key2 = headers["Sec-WebSocket-Key2"]
first = int("".join(i for i in key1 if i in digits)) / key1.count(" ")
second = int("".join(i for i in key2 if i in digits)) / key2.count(" ")
nonce = pack(">II8s", first, second, challenge)
return md5(nonce).digest()
def make_accept(key):
"""
Create an "accept" response for a given key.
This dance is expected to somehow magically make WebSockets secure.
"""
guid = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
return sha1("%s%s" %
|
(key, guid)).digest().encode("base64").strip()
# Frame helpers.
# Separated out to make unit testing a lot easier.
# Frames are bonghits in newer WS versions, so helpers are appreciated.
def make_hybi00_frame(buf):
"""
Make a HyBi-00 f
|
rame from some data.
This function does exactly zero checks to make sure that the data is safe
and valid text without any 0xff bytes.
"""
return "\x00%s\xff" % buf
def parse_hybi00_frames(buf):
"""
Parse HyBi-00 frames, returning unwrapped frames and any unmatched data.
This function does not care about garbage data on the wire between frames,
and will actively ignore it.
"""
start = buf.find("\x00")
tail = 0
frames = []
while start != -1:
end = buf.find("\xff", start + 1)
if end == -1:
# Incomplete frame, try again later.
break
else:
# Found a frame, put it in the list.
frame = buf[start + 1:end]
frames.append((NORMAL, frame))
tail = end + 1
start = buf.find("\x00", end + 1)
# Adjust the buffer and return.
buf = buf[tail:]
return frames, buf
def mask(buf, key):
"""
Mask or unmask a buffer of bytes with a masking key.
The key must be exactly four bytes long.
"""
# This is super-secure, I promise~
key = [ord(i) for i in key]
buf = list(buf)
for i, char in enumerate(buf):
buf[i] = chr(ord(char) ^ key[i % 4])
return "".join(buf)
def make_hybi07_frame(buf, opcode=0x1):
"""
Make a HyBi-07 frame.
This function always creates unmasked frames, and attempts to use the
smallest possible lengths.
"""
if len(buf) > 0xffff:
length = "\x7f%s" % pack(">Q", len(buf))
elif len(buf) > 0x7d:
length = "\x7e%s" % pack(">H", len(buf))
else:
length = chr(len(buf))
# Always make a normal packet.
header = chr(0x80 | opcode)
frame = "%s%s%s" % (header, length, buf)
return frame
def parse_hybi07_frames(buf):
"""
Parse HyBi-07 frames in a highly compliant manner.
"""
start = 0
frames = []
while True:
# If there's not at least two bytes in the buffer, bail.
if len(buf) - start < 2:
break
# Grab the header. This single byte holds some flags nobody cares
# about, and an opcode which nobody cares about.
header = ord(buf[start])
if header & 0x70:
# At least one of the reserved flags is set. Pork chop sandwiches!
raise WSException("Reserved flag in HyBi-07 frame (%d)" % header)
frames.append(("", CLOSE))
return frames, buf
# Get the opcode, and translate it to a local enum which we actually
# care about.
opcode = header & 0xf
try:
opcode = opcode_types[opcode]
except KeyError:
raise WSException("Unknown opcode %d in HyBi-07 frame" % opcode)
# Get the payload length and determine whether we need to look for an
# extra length.
length = ord(buf[start + 1])
masked = length & 0x80
length &= 0x7f
# The offset we're gonna be using to walk through the frame. We use
# this because the offset is variable depending on the length and
# mask.
offset = 2
# Extra length fields.
if length == 0x7e:
if len(buf) - start < 4:
break
length = buf[start + 2:start + 4]
length = unpack(">H", length)[0]
offset += 2
elif length == 0x7f:
|
midokura/python-neutron-plugin-midonet
|
midonet/neutron/plugin.py
|
Python
|
apache-2.0
| 35,809
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# Copyright (C) 2014 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Takaaki Suzuki, Midokura Japan KK
# @author: Tomoe Sugihara, Midokura Japan KK
# @author: Ryu Ishimoto, Midokura Japan KK
# @author: Rossella Sblendido, Midokura Japan KK
# @author: Duarte Nunes, Midokura Japan KK
from oslo.config import cfg
from midonetclient import client
from midonet.neutron import api
from midonet.neutron.common import config # noqa
from midonet.neutron.common import util
from midonet.neutron.db import task
from midonet.neutron import extensions
from sqlalchemy import exc as sa_exc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_gwmode_db
from neutron.db.loadbalancer import loadbalancer_db
from neutron.db import portbindings_db
from neutron.db import routedserviceinsertion_db as rsi_db
from neutron.db import securitygroups_db
from neutron.extensions import portbindings
from neutron.extensions import routedserviceinsertion as rsi
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
class MidonetPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
portbindings_db.PortBindingMixin,
external_net_db.External_net_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
securitygroups_db.SecurityGroupDbMixin,
rsi_db.RoutedServiceInsertionDbMixin,
loadbalancer_db.LoadBalancerPluginDb,
api.MidoNetApiMixin,
task.MidoClusterMixin):
supported_extension_aliases = ['agent',
'binding',
'bgp',
'cluster',
'chain-rule',
'dhcp_agent_scheduler',
'external-net',
'ip-addr-group',
'license',
'midonet-subnet',
'router',
'host',
'bridge',
'midonet-port',
'midonet-router',
'port-group',
'quotas',
'security-group',
'system',
'routed-service-insertion',
'routing-table',
'vtep',
'lbaas',
'tunnelzone']
__native_bulk_support = True
def __init__(self):
super(MidonetPluginV2, self).__init__()
# Instantiate MidoNet API client
conf = cfg.CONF.MIDONET
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.api_cli = client.MidonetClient(conf.midonet_uri, conf.username,
conf.password,
project_id=conf.project_id)
self.setup_rpc()
self.repair_quotas_table()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET,
portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
def setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_threads()
def repair_quotas_table(self):
query = ("CREATE TABLE `quotas` ( `id` varchar(36) NOT NULL, "
"`tenant_id` varchar(255) DEFAULT NULL, "
"`resource` varchar(255) DEFAULT NULL, "
"`limit` int(11) DEFAULT NULL, "
"PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8;")
session = db.get_session()
try:
session.execute(query)
except sa_exc.OperationalError:
# If the table already exists, then this is expected.
pass
def _process_create_network(self, context, network):
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
net_data['tenant_id'] = tenant_id
self._ensure_default_security_group(context, tenant_id)
with context.session.begin(subtransactions=True):
net = super(MidonetPluginV2, self).create_network(context, network)
self._process_l3_create(context, net, net_data)
return net
@util.handl
|
e_api_error
def create_network(self, context, network):
"""Create Neutron network.
Create a new Neutron network and its corresponding MidoNet bridge.
"""
LOG.info(_('MidonetPluginV2.create_network called: network=%r'),
network)
net = self._process_create_ne
|
twork(context, network)
try:
self.api_cli.create_network(net)
except Exception as ex:
LOG.error(_("Failed to create a network %(net_id)s in Midonet:"
"%(err)s"), {"net_id": net["id"], "err": ex})
with excutils.save_and_reraise_exception():
super(MidonetPluginV2, self).delete_network(context, net['id'])
LOG.info(_("MidonetPluginV2.create_network exiting: net=%r"), net)
return net
@util.handle_api_error
def update_network(self, context, id, network):
"""Update Neutron network.
Update an existing Neutron network and its corresponding MidoNet
bridge.
"""
LOG.info(_("MidonetPluginV2.update_network called: id=%(id)r, "
"network=%(network)r"), {'id': id, 'network': network})
with context.session.begin(subtransactions=True):
net = super(MidonetPluginV2, self).update_network(
context, id, network)
self._process_l3_update(context, net, network['network'])
self.api_cli.update_network(id, net)
LOG.info(_("MidonetPluginV2.update_networ
|
johanfrisk/Python_at_web
|
notebooks/code/geoxml.py
|
Python
|
mit
| 743
| 0.012113
|
import urllib
import xml.etree.ElementTree as ET
serviceurl = 'http://maps.googleapis.com/maps/api/geocode/xml?'
while True:
address = raw_input('Enter location: ')
if len(address) < 1 : break
url = serviceurl + urllib.urlencode({'sensor':'false', 'address': address})
print 'Retrieving', url
uh = urllib.urlopen(url)
data = uh.read()
print 'Retrieved',len(data),'characters'
print data
tree = ET.fromstring(data)
results = tree.findall('result')
lat = results[0].find('geometry').find('location').find('lat').text
lng = results[0].find('geometry').find('location').find('lng').text
location = results[0].find('formatted_add
|
ress').text
print 'la
|
t',lat,'lng',lng
print location
|
NicovincX2/Python-3.5
|
Algorithmique/Mathématiques discrètes/Combinatoire/Nombre de Catalan/nb_catalan_3methods.py
|
Python
|
gpl-3.0
| 1,091
| 0
|
# -*- coding: utf-8 -*-
import os
from math import factorial
import functools
def memoize(func):
cache = {}
def memoized(key):
# Returned, new, memoized version of decorated function
if key not in cache:
cache[key] = func(key)
return cache[key]
return functools.update_wrapper(memoized, func)
@memoize
def fact(n):
return factorial(n)
def cat_direct(n):
return fact(2 * n) // fact(n + 1) // fact(n)
@memoize
def c
|
atR1(n):
return (1 if n == 0
else sum(catR1(i) * catR1(n - 1 - i)
for i in range(n)))
@memoize
def catR2(n):
return (1 if n == 0
else ((4 * n - 2) * catR2(n - 1)) // (n + 1))
if __name__ == '__main__':
def pr(results):
fmt = '%-10s %-10s %-10s'
print((fmt % tuple(c.__name__ for c in defs)).upper
|
())
print(fmt % (('=' * 10,) * 3))
for r in zip(*results):
print(fmt % r)
defs = (cat_direct, catR1, catR2)
results = [tuple(c(i) for i in range(15)) for c in defs]
pr(results)
os.system("pause")
|
orbitfp7/nova
|
nova/scheduler/filter_scheduler.py
|
Python
|
apache-2.0
| 7,412
| 0.00027
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The FilterScheduler is for creating instances locally.
You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
import random
from oslo_config import cfg
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova import rpc
from nova.scheduler import driver
from nova.scheduler import scheduler_options
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
filter_scheduler_opts = [
cfg.IntOpt('scheduler_host_subset_size',
default=1,
help='New instances will be scheduled on a host chosen '
'randomly from a subset of the N best hosts. This '
'property defines the subset size that a host is '
'chosen from. A value of 1 chooses the '
'first host returned by the weighing functions. '
'This value must be at least 1. Any value less than 1 '
'will be ignored, and 1 will be used instead')
]
CONF.register_opts(filter_scheduler_opts)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.options = scheduler_options.SchedulerOptions()
self.notifier = rpc.get_notifier('scheduler')
def select_destinations(self, context, request_spec, filter_properties):
"""Selects a filtered set of hosts and nodes."""
self.notifier.info(context, 'scheduler.select_destinations.start',
dict(request_spec=request_spec))
num_instances = request_spec['num_instances']
selected_hosts = self._schedule(context, request_spec,
filter_properties)
# Couldn't fulfill the request_spec
if len(selected_hosts) < num_instances:
# Log the details but don't put those into the reason since
# we don't want to give away too much information about our
# actual environment.
LOG.debug('There are %(hosts)d hosts available but '
'%(num_instances)d instances requested to build.',
{'hosts': len(selected_hosts),
'num_instances': num_instances})
reason = _('There are not enough hosts available.')
raise exception.NoValidHost(reason=reason)
dests = [dict(host=host.obj.host, nodename=host.obj.nodename,
limits=host.obj.limits) for host in selected_hosts]
self.notifier.info(context, 'scheduler.select_destinations.end',
dict(request_spec=request_spec))
return dests
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def populate_filter_properties(self, request_spec, filter_properties):
"""Stuff things into filter_properties. Can be overridden in a
subclass to add more data.
"""
# Save useful information from the request spec for filter processing:
project_id = request_spec['instance_properties']['project_id']
os_type = request_spec['instance_properties']['os_type']
filter_properties['project_id'] = project_id
filter_properties['os_type'] = os_type
def _schedule(self, context, request_spec, filter_properties):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
update_group_hosts = filter_properties.get('group_updated', False)
config_options = self._get_configuration_options()
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'instance_type': instance_type})
self.populate_filter_properties(request_spec,
|
filter_properties)
# Find our local list of acceptable hosts by repeatedly
# filtering and weighing our options. Each time we choose a
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
# Note: remember, we are using an
|
iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(elevated)
selected_hosts = []
num_instances = request_spec.get('num_instances', 1)
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties, index=num)
if not hosts:
# Can't get any more locally.
break
LOG.debug("Filtered %(hosts)s", {'hosts': hosts})
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts})
scheduler_host_subset_size = CONF.scheduler_host_subset_size
if scheduler_host_subset_size > len(weighed_hosts):
scheduler_host_subset_size = len(weighed_hosts)
if scheduler_host_subset_size < 1:
scheduler_host_subset_size = 1
chosen_host = random.choice(
weighed_hosts[0:scheduler_host_subset_size])
selected_hosts.append(chosen_host)
# Now consume the resources so the filter/weights
# will change for the next instance.
chosen_host.obj.consume_from_instance(instance_properties)
if update_group_hosts is True:
# NOTE(sbauza): Group details are serialized into a list now
# that they are populated by the conductor, we need to
# deserialize them
if isinstance(filter_properties['group_hosts'], list):
filter_properties['group_hosts'] = set(
filter_properties['group_hosts'])
filter_properties['group_hosts'].add(chosen_host.obj.host)
return selected_hosts
def _get_all_host_states(self, context):
"""Template method, so a subclass can implement caching."""
return self.host_manager.get_all_host_states(context)
|
matteo88/gasistafelice
|
gasistafelice/rest/views/blocks/basket.py
|
Python
|
agpl-3.0
| 8,830
| 0.013024
|
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.core import urlresolvers
from gasistafelice.rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction, CREATE_PDF, SENDME_PDF
from gasistafelice.consts import EDIT, CONFIRM
from gasistafelice.lib.shortcuts import render_to_response, render_to_context_response
from gasistafelice.lib.http import HttpResponse
from gasistafelice.gas.models import GASMember
from gasistafelice.gas.forms.order.gmo import BasketGASMemberOrderForm
from gasistafelice.lib.formsets import BaseFormSetWithRequest
from django.forms.formsets import formset_factory
import cgi, os
import logging
log = logging.getLogg
|
er(__name__)
#---------------------------------
|
---------------------------------------------#
# #
#------------------------------------------------------------------------------#
class Block(BlockSSDataTables):
BLOCK_NAME = "basket"
BLOCK_DESCRIPTION = _("Basket")
BLOCK_VALID_RESOURCE_TYPES = ["gasmember"]
#3: 'ordered_product__stock__supplier_stock__product', gasstock
COLUMN_INDEX_NAME_MAP = {
0: 'pk',
1: 'ordered_product__order__pk',
2: 'ordered_product__gasstock__stock__supplier__name',
3: 'ordered_product__gasstock__stock__product__name',
4: 'ordered_price',
5: '' ,
6: 'ordered_amount',
7: 'tot_price',
8: 'enabled',
9: ''
}
#,
# 10: '' --> order_urn
def _get_user_actions(self, request):
user_actions = []
if not request.resource.gas.config.gasmember_auto_confirm_order:
#TODO seldon: does this work for a GASMember?
#if request.user.has_perm(EDIT, obj=request.resource):
if request.user == request.resource.person.user:
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=CONFIRM, verbose_name=_("Confirm all"),
popup_form=False,
),
]
if request.user == request.resource.person.user:
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=CREATE_PDF, verbose_name=_("Create PDF"),
popup_form=False,
method="OPENURL"
),
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=SENDME_PDF, verbose_name=_("Send email PDF gasmember"),
popup_form=False,
)
]
return user_actions
def _get_resource_list(self, request):
#qs = request.resource.basket | request.resource.basket_to_be_delivered
qs = request.resource.basket
return qs
def _get_edit_multiple_form_class(self):
qs = self._get_resource_list(self.request)
return formset_factory(
form=BasketGASMemberOrderForm,
formset=BaseFormSetWithRequest,
extra=qs.count()
)
def _get_records(self, request, querySet):
"""Return records of rendered table fields."""
gmos = querySet
data = {}
# data2 = {}
i = 0
c = gmos.count()
# Store mapping between GSSOP-id and neededs info: formset_index and ordered_total
map_info = { }
gmo = self.resource #GASMemberOrder()
av = False
for i,el in enumerate(querySet):
key_prefix = 'form-%d' % i
data.update({
'%s-id' % key_prefix : el.pk, #gmo.pk,
'%s-ordered_amount' % key_prefix : el.ordered_amount or 0,
'%s-ordered_price' % key_prefix : el.ordered_product.order_price, #displayed as hiddend field
'%s-gm_id' % key_prefix : gmo.pk, #displayed as hiddend field !Attention is gmo_id
'%s-gsop_id' % key_prefix : el.ordered_product.pk,
'%s-enabled' % key_prefix : bool(av),
})
map_info[el.pk] = {
'formset_index' : i,
'ordered_total' : el.price_expected, # This is the total computed NOW (with ordered_product.price)
}
data['form-TOTAL_FORMS'] = c
data['form-INITIAL_FORMS'] = c
data['form-MAX_NUM_FORMS'] = 0
formset = self._get_edit_multiple_form_class()(request, data)
records = []
for i,el in enumerate(querySet):
form = formset[map_info[el.pk]['formset_index']]
total = map_info[el.pk]['ordered_total']
form.fields['ordered_amount'].widget.attrs = {
'class' : 'amount',
'step' : el.ordered_product.gasstock.step or 1,
'minimum_amount' : el.ordered_product.gasstock.minimum_amount or 1,
'eur_chan' : ["", "alert"][bool(el.has_changed)],
'req_conf' : ["alert", ""][bool(el.is_confirmed)],
's_url' : el.supplier.urn,
'p_url' : el.stock.urn,
}
#'p_url' : el.product.urn,
records.append({
'id' : "%s %s %s %s %s" % (el.pk, form['id'], form['gm_id'], form['gsop_id'], form['ordered_price']),
'order' : el.order.pk,
'supplier' : el.supplier,
'product' : el.product,
'price' : el.ordered_product.order_price,
'price_changed' : not el.has_changed,
'ordered_amount' : form['ordered_amount'], #field inizializzato con il minimo amount e che ha l'attributo step
'ordered_total' : total,
'field_enabled' : form['enabled'],
'order_confirmed' : el.is_confirmed,
'order_urn' : el.order.urn,
})
#'description' : el.product.description,
#return records, records, {}
return formset, records, {}
def _set_records(self, request, records):
pass
def get_response(self, request, resource_type, resource_id, args):
try:
rv = super(Block, self).get_response(request, resource_type, resource_id, args)
except NotImplementedError:
# Not implemented args are implemented in this method
pass
if args == CONFIRM:
for gmo in self.resource.basket:
log.debug(u"Sto confermando un ordine gasista(%s)" % gmo)
gmo.confirm()
gmo.save()
#IMPORTANT: unset args to compute table results!
args = self.KW_DATA
elif args == CREATE_PDF:
rv = self._create_pdf()
elif args == SENDME_PDF:
rv = self._send_email_logged()
#TODO FIXME: ugly patch to fix AFTERrecords.append( 6
if args == self.KW_DATA:
from gasistafelice.lib.views_support import prepare_datatables_queryset, render_datatables
querySet = self._get_resource_list(request)
#columnIndexNameMap is required for correct sorting behavior
columnIndexNameMap = self.COLUMN_INDEX_NAME_MAP
#path to template used to generate json (optional)
jsonTemplatePath = 'blocks/%s/data.json' % self.BLOCK_NAME
querySet, dt_params = prepare_datatables_queryset(request, querySet, columnIndexNameMap)
#TODO FIXME: AFTER 6
formset, records, moreData = self._get_records(request, querySet)
rv = render_datatables(request, records, dt_params, jsonTemplatePath)
return rv
def _send_email_logged(self):
try:
#WAS: to = self.request.user.email
#WAS: self.resource.send_email([to],None, 'Order Email me', self.request.user)
self.resource.send_email_to_gasmember(None
|
gaamy/INF4215_IA
|
tp1/breadthfirst_search.py
|
Python
|
gpl-2.0
| 634
| 0.003155
|
# Breadth-first Search
#
# Author: Michel Gagnon
# michel.gagnon@polytml.ca
from node import *
from state import *
def breadthfirst_search(initialState):
frontier = [Node(initi
|
alState)]
visited = set()
while frontier:
node = frontier.pop(0)
visited.add(node.state)
# node.state.show()
# print '----------------'
if node.state.isGoal():
node.state.show()
return node
elif node.isRepeated():
continue
else:
frontier =
|
frontier + [child for child in node.expand() if child.state not in visited]
return None
|
grahamhayes/designate
|
designate/api/v2/controllers/tsigkeys.py
|
Python
|
apache-2.0
| 4,349
| 0
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and li
|
mitations
# under the License.
import pecan
from oslo_log import log as logging
from designate import utils
from designate.api.v2.controllers import rest
from designate.objects import TsigKey
from designate.
|
objects.adapters import DesignateAdapter
LOG = logging.getLogger(__name__)
class TsigKeysController(rest.RestController):
SORT_KEYS = ['created_at', 'id', 'updated_at', 'name']
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('tsigkey_id')
def get_one(self, tsigkey_id):
"""Get TsigKey"""
request = pecan.request
context = request.environ['context']
return DesignateAdapter.render(
'API_v2',
self.central_api.get_tsigkey(context, tsigkey_id),
request=request)
@pecan.expose(template='json:', content_type='application/json')
def get_all(self, **params):
"""List all TsigKeys"""
request = pecan.request
context = request.environ['context']
# Extract the pagination params
marker, limit, sort_key, sort_dir = utils.get_paging_params(
params, self.SORT_KEYS)
# Extract any filter params
accepted_filters = ('name', 'algorithm', 'scope')
criterion = self._apply_filter_params(
params, accepted_filters, {})
return DesignateAdapter.render(
'API_v2',
self.central_api.find_tsigkeys(
context, criterion, marker, limit, sort_key, sort_dir),
request=request)
@pecan.expose(template='json:', content_type='application/json')
def post_all(self):
"""Create TsigKey"""
request = pecan.request
response = pecan.response
context = request.environ['context']
body = request.body_dict
tsigkey = DesignateAdapter.parse('API_v2', body, TsigKey())
tsigkey.validate()
# Create the tsigkey
tsigkey = self.central_api.create_tsigkey(
context, tsigkey)
tsigkey = DesignateAdapter.render('API_v2', tsigkey, request=request)
response.headers['Location'] = tsigkey['links']['self']
response.status_int = 201
# Prepare and return the response body
return tsigkey
@pecan.expose(template='json:', content_type='application/json')
@pecan.expose(template='json:', content_type='application/json-patch+json')
@utils.validate_uuid('tsigkey_id')
def patch_one(self, tsigkey_id):
"""Update TsigKey"""
request = pecan.request
context = request.environ['context']
body = request.body_dict
response = pecan.response
if request.content_type == 'application/json-patch+json':
raise NotImplemented('json-patch not implemented')
# Fetch the existing tsigkey entry
tsigkey = self.central_api.get_tsigkey(context, tsigkey_id)
tsigkey = DesignateAdapter.parse('API_v2', body, tsigkey)
# Validate the new set of data
tsigkey.validate()
# Update and persist the resource
tsigkey = self.central_api.update_tsigkey(context, tsigkey)
response.status_int = 200
return DesignateAdapter.render('API_v2', tsigkey, request=request)
@pecan.expose(template=None, content_type='application/json')
@utils.validate_uuid('tsigkey_id')
def delete_one(self, tsigkey_id):
"""Delete TsigKey"""
request = pecan.request
response = pecan.response
context = request.environ['context']
self.central_api.delete_tsigkey(context, tsigkey_id)
response.status_int = 204
# NOTE: This is a hack and a half.. But Pecan needs it.
return ''
|
suziesparkle/wagtail
|
wagtail/vendor/django-treebeard/treebeard/tests/test_treebeard.py
|
Python
|
bsd-3-clause
| 90,877
| 0.000044
|
# -*- coding: utf-8 -*-
"""Unit/Functional tests"""
from __future__ import with_statement, unicode_literals
import datetime
import os
import sys
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.db.models import Q
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
import pytest
from treebeard import numconv
from treebeard.admin import admin_factory
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant,\
PathOverflow, MissingNodeOrderBy
from treebeard.forms import movenodeform_factory
from treebeard.templatetags.admin_tree import get_static_url
from treebeard.tests import models
BASE_DATA = [
{'data': {'desc': '1'}},
{'data': {'desc': '2'}, 'children': [
{'data': {'desc': '21'}},
{'data': {'desc': '22'}},
{'data': {'desc': '23'}, 'children': [
{'data': {'desc': '231'}},
]},
{'data': {'desc': '24'}},
]},
{'data': {'desc': '3'}},
{'data': {'desc': '4'}, 'children': [
{'data': {'desc': '41'}},
]}]
UNCHANGED = [
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
def _prepare_db_test(request):
case = TestCase(methodName='__init__')
case._pre_setup()
request.addfinalizer(case._post_teardown)
return request.param
@pytest.fixture(scope='function',
params=models.BASE_MODELS + models.PROXY_MODELS)
def model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.BASE_MODELS)
def model_without_proxy(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.UNICODE_MODELS)
def model_with_unicode(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.SORTED_MODELS)
def sorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.RELATED_MODELS)
def related_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.MP_SHORTPATH_MODELS)
def mpshort_model(request):
retu
|
rn _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeShortPath])
def mpshortnotsorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeAlphabet])
def mpalphabet_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSortedAutoNow])
def mpsortedautonow_mode
|
l(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSmallStep])
def mpsmallstep_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestManyToManyWithUser])
def mpm2muser_model(request):
return _prepare_db_test(request)
class TestTreeBase(object):
def got(self, model):
if model in [models.NS_TestNode, models.NS_TestNode_Proxy]:
# this slows down nested sets tests quite a bit, but it has the
# advantage that we'll check the node edges are correct
d = {}
for tree_id, lft, rgt in model.objects.values_list('tree_id',
'lft',
'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for tree_id, got_edges in d.items():
assert len(got_edges) == max(got_edges)
good_edges = list(range(1, len(got_edges) + 1))
assert sorted(got_edges) == good_edges
return [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def _assert_get_annotated_list(self, model, expected, parent=None):
got = [
(obj[0].desc, obj[1]['open'], obj[1]['close'], obj[1]['level'])
for obj in model.get_annotated_list(parent)
]
assert expected == got
class TestEmptyTree(TestTreeBase):
def test_load_bulk_empty(self, model):
ids = model.load_bulk(BASE_DATA)
got_descs = [obj.desc
for obj in model.objects.filter(id__in=ids)]
expected_descs = [x[0] for x in UNCHANGED]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == UNCHANGED
def test_dump_bulk_empty(self, model):
assert model.dump_bulk() == []
def test_add_root_empty(self, model):
model.add_root(desc='1')
expected = [('1', 1, 0)]
assert self.got(model) == expected
def test_get_root_nodes_empty(self, model):
got = model.get_root_nodes()
expected = []
assert [node.desc for node in got] == expected
def test_get_first_root_node_empty(self, model):
got = model.get_first_root_node()
assert got is None
def test_get_last_root_node_empty(self, model):
got = model.get_last_root_node()
assert got is None
def test_get_tree(self, model):
got = list(model.get_tree())
assert got == []
def test_get_annotated_list(self, model):
expected = []
self._assert_get_annotated_list(model, expected)
class TestNonEmptyTree(TestTreeBase):
@classmethod
def setup_class(cls):
for model in models.BASE_MODELS:
model.load_bulk(BASE_DATA)
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.BASE_MODELS)
class TestClassMethods(TestNonEmptyTree):
def test_load_bulk_existing(self, model):
# inserting on an existing node
node = model.objects.get(desc='231')
ids = model.load_bulk(BASE_DATA, node)
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
expected_descs = ['1', '2', '21', '22', '23', '231', '24',
'3', '4', '41']
got_descs = [obj.desc for obj in model.objects.filter(id__in=ids)]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == expected
def test_get_tree_all(self, model):
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
assert got == UNCHANGED
def test_dump_bulk_all(self, model):
assert model.dump_bulk(keep_ids=False) == BASE_DATA
def test_get_tree_node(self, model):
node = model.objects.get(desc='231')
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree(node)]
expected = [('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0)]
assert got == expected
def test_get_tree_leaf(self, model):
node = model.objects.get(desc='1')
assert 0 == n
|
wq/xlsform-converter
|
tests/files/input_types/models.py
|
Python
|
mit
| 2,324
| 0
|
from django.contrib.gis.db import models
class InputTypes(models.Model):
int_field = models.IntegerField(
null=True,
blank=True,
verbose_name="Integer field",
help_text="Enter an integer number.",
)
dec_field = models.FloatField(
null=True,
blank=True,
verbose_name="Decimal field",
help_text="Enter a decimal number.",
)
text_field = models.TextField(
null=True,
blank=True,
verbose_name="Text field",
help_text="Enter some text.",
)
char_field = models.CharField(
max_length=5,
null=True,
blank=True
|
,
verbose_name="Char field",
help_text="Enter some text.",
)
point_field = models.PointField(
srid=4326,
verbose_name="Point field",
|
help_text="Enter a point.",
)
linestring_field = models.LineStringField(
srid=4326,
null=True,
blank=True,
verbose_name="Line string field",
help_text="Enter a line.",
)
polygon_field = models.PolygonField(
srid=4326,
null=True,
blank=True,
verbose_name="Polygon field",
help_text="Enter a polygon.",
)
date_field = models.DateField(
null=True,
blank=True,
verbose_name="Date field",
help_text="Enter a date.",
)
time_field = models.TimeField(
null=True,
blank=True,
verbose_name="Time field",
help_text="Enter a time.",
)
datetime_field = models.DateTimeField(
null=True,
blank=True,
verbose_name="Date+time field",
help_text="Enter a date and a time.",
)
image_field = models.ImageField(
upload_to="inputtypes",
verbose_name="Image field",
help_text="Add an image.",
)
audio_field = models.FileField(
upload_to="inputtypes",
null=True,
blank=True,
verbose_name="Audio field",
help_text="Add an audio file.",
)
video_field = models.FileField(
upload_to="inputtypes",
null=True,
blank=True,
verbose_name="Video field",
help_text="Add a video.",
)
class Meta:
verbose_name = "input_types"
verbose_name_plural = "inputtypes"
|
gradiuscypher/internet_illithid
|
mirror_shield/endpoints/filestore.py
|
Python
|
mit
| 2,550
| 0.001176
|
import traceback
import requests
import time
import imghdr
from os.path import exists, isfile, join, isdir
from os import makedirs, listdir, walk
from flask import Blueprint, request, send_from_directory, render_template
filestore = Blueprint('callback', __name__)
@filestore.route('/clone', methods=["POST"])
def clone():
try:
# Grab the JSON content in post
content = request.get_json()
url = content['url']
url_filename = url.split("/")[-1]
sender = content['sender']
source = content['source']
timestamp = int(time.time())
filename = "files/{}/{}/{}-{}".format(source, sender, timestamp, url_filename)
# Check if the user's folder exists
if not exists("files/{}/{}".format(source, sender)):
makedirs("files/{}/{}".format(source, sender))
# Download the file and save to the user's directory
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
return filename, 200
except:
print(traceback.format_exc())
return "Fail", 500
@filestore.route('/files/<path:path>', methods=["GET"])
def files(path):
return send_from_directory('files', path)
@filestore.route('/src/<path:path>', methods=["GET"])
def src(path):
print("PATH IS {}".format(path))
return send_from_directory('src', path)
@filestore.route('/', methods=["GET"])
def services():
services = []
|
filepath = "files/"
for f in list
|
dir(filepath):
if isdir(join(filepath, f)):
services.append(f)
return render_template('servicelist.html', services=services)
@filestore.route('/<service>/userlist', methods=["GET"])
def userlist(service):
users = []
filepath = "files/{}".format(service)
for f in listdir(filepath):
if isdir(join(filepath, f)):
users.append(f)
return render_template('userlist.html', users=users, service=service)
@filestore.route('/<service>/gallery/<user>', methods=["GET"])
def gallery(user, service):
filepath = "files/{}/{}".format(service, user)
images = []
other = []
for f in listdir(filepath):
if isfile(join(filepath, f)):
if imghdr.what(join(filepath, f)) is not None:
images.append(f)
else:
other.append(f)
return render_template('gallery.html', title="Gallery", images=images, filepath=filepath, otherfiles=other)
|
toddpalino/kafka-tools
|
kafka/tools/protocol/responses/__init__.py
|
Python
|
apache-2.0
| 2,328
| 0.000859
|
import abc
import pprint
import six
def _decode_plain_type(value_type, buf):
if value_type == 'int8':
return buf.getInt8()
elif value_type == 'int16':
return buf.getInt16()
elif va
|
lue_type == 'int32':
return buf.getInt32()
elif value_type == 'int64':
return buf.getInt64()
elif value_type == 'string':
val_len = buf.getInt16()
return None if val_len == -1 else buf.get(val_len).decode("utf-8")
elif value_type == 'bytes':
val_len = buf.getInt32()
return None if val_l
|
en == -1 else buf.get(val_len)
elif value_type == 'boolean':
return buf.getInt8() == 1
else:
raise NotImplementedError("Reference to non-implemented type in schema: {0}".format(value_type))
def _decode_array(array_schema, buf):
array_len = buf.getInt32()
if array_len == -1:
return None
if isinstance(array_schema, six.string_types):
return [_decode_plain_type(array_schema, buf) for i in range(array_len)]
else:
return [_decode_sequence(array_schema, buf) for i in range(array_len)]
def _decode_sequence(sequence_schema, buf):
val = {}
for entry in sequence_schema:
if entry['type'].lower() == 'array':
val[entry['name']] = _decode_array(entry['item_type'], buf)
else:
val[entry['name']] = _decode_plain_type(entry['type'].lower(), buf)
return val
@six.add_metaclass(abc.ABCMeta)
class BaseResponse(): # pragma: no cover
@abc.abstractproperty
def schema(self):
pass
@classmethod
def from_bytebuffer(cls, correlation_id, buf):
seq_obj = _decode_sequence(cls.schema, buf)
rv = cls(seq_obj)
rv.correlation_id = correlation_id
return rv
def __init__(self, sequence_obj):
self._response = sequence_obj
def __hash__(self):
return id(self)
def __str__(self):
pp = pprint.PrettyPrinter(indent=4)
return pp.pformat(self._response)
def __len__(self):
return len(self._response)
def __contains__(self, k):
return k in self._response
def __getitem__(self, k):
return self._response[k]
def __setitem__(self, k):
raise NotImplementedError
def __delitem__(self, k):
raise NotImplementedError
|
IEEEDTU/CMS
|
Course/views/CourseCurriculum.py
|
Python
|
mit
| 2,775
| 0
|
from django.core import serializers
from django.http import HttpResponse, JsonResponse
from Course.models import *
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
import json
@csrf_exempt
@require_POST
def addCourseCurriculum(request):
response_data = {}
try:
C = CourseCurriculum.objects.addCourseCurriculum(request.POST)
except Exception as e:
response_data['success'] = '0'
response_data['exception'] = str(e)
else:
response_data['success'] = '1'
data = serializers.serialize('json', [C, ])
response_data["coursecurriculum"] = json.loads(data)
return JsonResponse(response_data)
@csrf_exempt
@require_POST
def editCourseCurriculum(request):
response_data = {}
try:
C = CourseCurriculum.objects.editCourseCurriculum(request.POST)
except Exception as e:
response_data['success'] = '0'
response_data['exception'] = str(e)
else:
response_data['success'] = '1'
|
data = serializers.serialize('json', [C, ])
response_data
|
["coursecurriculum"] = json.loads(data)
return JsonResponse(response_data)
@csrf_exempt
@require_POST
def deleteCourseCurriculum(request):
response_data = {}
try:
C = CourseCurriculum.objects.deleteCourseCurriculum(request.POST)
except Exception as e:
response_data['success'] = '0'
response_data['exception'] = str(e)
else:
response_data['success'] = '1'
data = serializers.serialize('json', [C, ])
response_data["coursecurriculum"] = json.loads(data)
return JsonResponse(response_data)
@csrf_exempt
@require_GET
def getCourseCurriculum(request):
response_data = {}
try:
C = CourseCurriculum.objects.getCourseCurriculum(request.GET)
except Exception as e:
response_data["success"] = 0
response_data['exception'] = str(e)
else:
response_data["success"] = 1
data = serializers.serialize('json', [C.instructor, ])
response_data["coursecurriculum"] = json.loads(data)
return JsonResponse(response_data)
@csrf_exempt
@require_GET
def retrieveCourseCurriculum(request):
response_data = {}
try:
C = CourseCurriculum.objects.retrieveCourseCurriculum(request.GET)
except Exception as e:
response_data['success'] = '0'
response_data['exception'] = str(e)
else:
response_data['success'] = '1'
global data
try:
data = serializers.serialize('json', C)
except Exception as e:
data = serializers.serialize('json', [C, ])
response_data["coursecurriculum"] = json.loads(data)
return JsonResponse(response_data)
|
zhuango/python
|
sklearnLearning/statisticalAndSupervisedLearning/adaboost.py
|
Python
|
gpl-2.0
| 2,852
| 0.002454
|
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoost
|
Classifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostC
|
lassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
|
manazag/hopper.pw
|
hopperpw/hopperpw/settings/test.py
|
Python
|
bsd-3-clause
| 229
| 0.004367
|
# coding=utf-8
from __future__ import
|
absolute_import
from .base import *
# ######### IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "
|
django.db.backends.sqlite3",
"NAME": ":memory:",
},
}
|
chrys87/orca-beep
|
test/keystrokes/gtk-demo/role_table.py
|
Python
|
lgpl-2.1
| 3,344
| 0.001794
|
#!/usr/bin/python
"""Test of table output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("End"))
sequence.append(KeyComboAction("Up"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"1. Table Where Am I",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 3 bottles of coke'",
" VISIBLE: '3 bottles of coke', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Number.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'column 1 of 3'",
"SPEECH OUTPUT: 'row 1 of 5.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Next row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles
|
', cursor=1",
"SPEECH OUTPUT: '5 packages of noodles.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"3. Table Where Am I (again)",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Number.'",
|
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'column 1 of 3'",
"SPEECH OUTPUT: 'row 2 of 5.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("F11"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"4. Turn row reading off",
["BRAILLE LINE: 'Speak cell'",
" VISIBLE: 'Speak cell', cursor=0",
"SPEECH OUTPUT: 'Speak cell'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"5. Table Right to the Product column in the packages of noodles row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header packages of noodles table cell'",
" VISIBLE: 'packages of noodles table cell', cursor=1",
"SPEECH OUTPUT: 'Product column header packages of noodles.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"6. Table up to bottles of coke",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header bottles of coke table cell'",
" VISIBLE: 'bottles of coke table cell', cursor=1",
"SPEECH OUTPUT: 'bottles of coke.'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
failys/CAIRIS
|
cairis/test/test_Attacker.py
|
Python
|
apache-2.0
| 4,335
| 0.009919
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
from subprocess import call
import cairis.core.BorgFactory
from cairis.core.Borg import Borg
from cairis.core.EnvironmentParameters import EnvironmentParameters
from cairis.core.RoleParameters import RoleParameters
from cairis.core.AttackerParameters import AttackerParameters
from cairis.core.AttackerEnvironmentProperties import AttackerEnvironmentProperties
__author__ = 'Shamal Faily'
class AttackerTest(unittest.TestCase):
def setUp(self):
call([os.environ['CAIRIS_CFG_DIR'] + "/initdb.sh"])
cairis.core.BorgFactory.initialise()
f = open(os.environ['CAIRIS_SRC'] + '/test/attackers.json')
d = json.load(f)
f.close()
ienvs = d['environments']
iep = EnvironmentParameters(ienvs[0]["theName"],ienvs[0]["theShortCode"],ienvs[0]["theDescription"])
iep1 = EnvironmentParameters(ienvs[1]["theName"],ienvs[1]["theShortCode"],ienvs[1]["theDescription"])
b = Borg()
b.dbProxy.addEnvironment(iep)
b.dbProxy.addEnvironment(iep1)
iRoles = d['roles']
irp = RoleParameters(iRoles[0]["theName"], iRoles[0]["theType"], iRoles[0]["theShortCode"], iRoles[0]["theDescription"],[])
b.dbProxy.addRole(irp)
self.iAttackers = d['attackers']
def testAttacker(self):
iatkeps = [AttackerEnvironmentProperties(self.iAttackers[0]["theEnvironmentProperties"][0]["theName"],self.iAttackers[0]["theEnvironmentProperties"][0]["theRoles"],self.iAttackers[0]["theEnvironmentProperties"][0]["theMotives"],self.iAttackers[0]["theEnvironmentProperties"][0]["theCapabilities"]),AttackerEnvironmentProperties(self.iAttackers[0]["theEnvironmentProperties"][1]["theName"],self.iAttackers[0]["theEnvironmentProperties"][1]["theRoles"],self.iAttackers[0]["theEnvironmentProperties"][1]["theMotives"],self.iAttackers[0]["theEnvironmentProperties"][1]["theCapabilities"])]
iatk = AttackerParameters(self.iAttackers[0]["theName"], self.iAttackers[0]["theDescription"], self.iAttackers[0]["theImage"],[],iatkeps)
b = Borg()
b.dbProxy.addAttacker(iatk)
oAttackers = b.dbProxy.getAttackers()
o = oAttackers[self.iAttackers[0]["theName"]]
self.assertEqual(iatk.name(), o.name())
self.assertEqual(iatk.description(),o.description())
self.assertEqual(iatk.image(),o.image())
oatkeps = o.environmentProperties()
self.assertEqual(iatkeps[0].name(), oatkeps[0].name())
self.assertEqual(str(iatkeps[0].roles()[0
|
]), str(oatkeps[0].roles()[0]))
self.assertEqual(str(iatkeps[0].roles()[0]), o.roles('Day','')[0])
self.assertEqual(iatkeps[0].roles(), list(o.roles('','Maximise')))
self.assertEqual(str(iatkeps[0].motives()[0]), str(oatkeps[0].motives()[0]))
self.assertEqual(str(iatkeps[0].motives()[0]), str(o.motives('Day','')[0]))
self.assertEqual(iatkeps[0].motives(), list(o.motives('','Maximise')))
self.assertEqual(str(iatkeps[0].capabilitie
|
s()[0][0]), str(oatkeps[0].capabilities()[0][0]))
self.assertEqual(str(iatkeps[0].capabilities()[0][1]), str(oatkeps[0].capabilities()[0][1]))
self.assertEqual(iatkeps[0].capabilities()[0][0], o.capability('Day','')[0][0])
self.assertEqual(iatkeps[0].capabilities()[0][0], list(o.capability('','Maximise'))[0][0])
iatk.theName = 'Updated name'
iatk.setId(o.id())
b.dbProxy.updateAttacker(iatk)
oAttackers = b.dbProxy.getAttackers()
o = oAttackers["Updated name"]
self.assertEqual(o.name(),'Updated name')
b.dbProxy.deleteAttacker(o.id())
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
kevin8909/xjerp
|
openerp/addons/crm/crm_lead.py
|
Python
|
agpl-3.0
| 53,864
| 0.005607
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_status.base_stage import base_stage
import crm
from datetime import datetime
from operator import itemgetter
from openerp.osv import fields, osv, orm
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from base.res.res_partner import format_address
CRM_LEAD_FIELDS_TO_MERGE = ['name',
'partner_id',
'channel_id',
'company_id',
'country_id',
'section_id',
'state_id',
'stage_id',
'type_id',
'user_id',
'title',
'city',
'contact_name',
'description',
'email',
'fax',
'mobile',
'partner_name',
'phone',
'probability',
'planned_revenue',
'street',
'street2',
'zip',
'create_date',
'date_action_last',
'date_action_next',
'email_from',
'email_cc',
'partner_name']
CRM_LEAD_PENDING_STATES = (
crm.AVAILABLE_STATES[2][0], # Cancelled
crm.AVAILABLE_STATES[3][0], # Done
crm.AVAILABLE_STATES[4][0], # Pending
)
class crm_lead(base_stage, format_address, osv.osv):
""" CRM Lead Case """
_name = "crm.lead"
_description = "Lead/Opportunity"
_order = "priority,date_action,id desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'crm.mt_lead_create': lambda self, cr, uid, obj, ctx=None: obj['state'] in ['new', 'draft'],
'crm.mt_lead_won': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
'crm.mt_lead_lost': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'cancel',
},
'stage_id': {
'crm.mt_lead_stage': lambda self, cr, uid, obj, ctx=None: obj['state'] not in ['new', 'draft', 'cancel', 'done'],
},
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('type') and not context.get('default_type'):
context['default_type'] = vals.get('type')
if vals.get('section_id') and not context.get('default_section_id'):
context['default_section_id'] = vals.get('section_id')
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(crm_lead, self).create(cr, uid, vals, context=create_context)
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return self._resolve_section_id_from_context(cr, uid, context=context) or False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
section_id = self._get_default_section_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], section_id, [('state', '=', 'draft')], context=context)
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_name = context['default_section_id']
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=section_name, context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
def _resolve_type_from_context(self, cr, uid, context=None):
""" Returns the type (lead or opportunity) from the type context
key. Returns None if it cannot be resolved.
"""
if context is None:
context = {}
return context.get('default_type')
def _read_group_stage_ids(self, cr, ui
|
d, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('crm.case.stage')
order = stage_obj._order
# lame hack to allow reverting search, should jus
|
t work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('section_ids', '=', section_id), ('fold', '=', False) if section_id: add section columns that are not folded
search_domain = []
section_id = self._resolve_section_id_from_context(cr, uid, context=context)
if section_id:
search_domain += ['|', ('section_ids', '=', section_id)]
search_domain += [('id', 'in', ids)]
else:
search_domain += ['|', ('id', 'in', ids), ('case_default', '=', True)]
# retrieve type from the context (if set: choose 'type' or 'both')
type = self._resolve_type_from_context(cr, uid, context=context)
if type:
search_domain += ['|', ('type', '=', type), ('type', '=', 'both')]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(crm_lead,self).fields_view_get(cr, user, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self.fields_view_get_address(cr, user, res['arch'], context=context)
return res
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
:return dict: difference between current date and log date
"""
cal_obj = self.pool.get('resource.calendar')
res_obj = self.pool.get('resource.resource')
res = {}
for lead in self.browse(cr, uid, ids, context=context):
for field in fields:
res[lead.id] = {}
duration = 0
ans = False
if field == 'day_open':
if lead.date_open:
date_create = datetime.strptime(lead.create_date, "%Y-%m-%d %H:%M:%S")
date_open = datetime.strptime(lead.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
date_until = lead.date_ope
|
teddyrendahl/powermate
|
tests/conftest.py
|
Python
|
apache-2.0
| 1,119
| 0.008937
|
##############
# Standard
|
#
##############
import io
import logging
import tempfile
##############
# External #
##############
import pytest
##############
# Module #
##############
import powermate
#Enable the logging level to be set from the command line
def pytest_addoption(parser):
parser.addoption('--log', action='store', default='INFO',
help='Set the level of the log')
#Fixture to automatically instantiate logging setup
@pytest.fixture(scope='session', autouse=True)
def set_level
|
(pytestconfig):
#Read user input logging level
log_level = getattr(logging, pytest.config.getoption('--log'), None)
#Report invalid logging level
if not isinstance(log_level, int):
raise ValueError("Invalid log level : {}".format(log_level))
#Create basic configuration
logging.basicConfig(level=log_level, format='%(message)s')
@pytest.fixture(scope='module')
def pseudo_socket():
with tempfile.NamedTemporaryFile() as tmp:
s = powermate.event.Socket(tmp.name)
s._input = io.BytesIO()
s._output = io.BytesIO()
yield s
|
manhhomienbienthuy/pythondotorg
|
sponsors/migrations/0057_auto_20211026_1529.py
|
Python
|
apache-2.0
| 416
| 0
|
# Generated by Django 2.2.24 on 2021-1
|
0-26 15:29
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0056_textasset'),
]
operations = [
migrations.AlterField(
model_name='genericasset',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
|
]
|
odoousers2014/odoo
|
addons/l10n_co/__openerp__.py
|
Python
|
agpl-3.0
| 1,792
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) David Arnold (devCO).
# Author David Arnold (devCO), dar@devco.co
# Co-Authors Juan Pablo Aries (devCO), jpa@devco.co
# Hector Ivan Valencia Muñoz (TIX SAS)
# Nhomar Hernandez (Vauxoo)
# Humberto Ochoa (Vauxoo)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at you
|
r option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU A
|
ffero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Colombian - Accounting',
'version': '0.8',
'category': 'Localization/Account Charts',
'description': 'Colombian Accounting and Tax Preconfiguration',
'author': 'David Arnold BA HSG (devCO)',
'depends': [
'account',
'base_vat',
'account_chart',
],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'wizard/account_wizard.xml',
],
'demo': [],
'installable': True,
}
|
guilhermedallanol/dotfiles
|
vim/plugged/vial-http/server/dswf.py
|
Python
|
mit
| 2,837
| 0.003525
|
"""Thin wrapper around Werkzeug because Flask and Bottle
do not play nicely with async uwsgi"""
import json
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.utils import redirect
from covador import ValidationDecorator, schema, list_schema
from covador.utils import merge_dicts, parse_qs
from covador.errors import error_to_json
class AppRequest(Request):
def after(self, func):
try:
handlers = self._after_request_handlers
except AttributeError:
handlers = self._after_request_handlers = []
handlers.append(func)
return func
class App:
def __init__(self):
self._url_map = Map(strict_slashes=False)
def route(self, rule, **kwargs):
def decorator(func):
kwargs['endpoint'] = func
self._url_map.add(Rule(rule, **kwargs))
return func
return decorator
def _dispatch(self, request):
|
adapter = self._url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return endpoint(request, **values)
except HTTPException as e:
return e
def _
|
_call__(self, env, sr):
request = AppRequest(env)
response = self._dispatch(request)
after_handlers = getattr(request, '_after_request_handlers', None)
if after_handlers:
for h in after_handlers:
response = h(response) or response
return response(env, sr)
def error_handler(ctx): # pragma: no cover
return Response(error_to_json(ctx.exception), mimetype='application/json', status=400)
def get_qs(request):
try:
return request._covador_qs
except AttributeError:
qs = request._covador_qs = parse_qs(request.environ.get('QUERY_STRING', ''))
return qs
def get_form(request):
try:
return request._covador_form
except AttributeError:
form = request._covador_form = parse_qs(request.get_data(parse_form_data=False))
return form
_query_string = lambda request, *_args, **_kwargs: get_qs(request)
_form = lambda request, *_args, **_kwargs: get_form(request)
_params = lambda request, *_args, **_kwargs: merge_dicts(get_qs(request), get_form(request))
_rparams = lambda request, *_args, **kwargs: kwargs
_json = lambda request, *_args, **_kwargs: json.loads(request.get_data(parse_form_data=False))
query_string = ValidationDecorator(_query_string, error_handler, list_schema)
form = ValidationDecorator(_form, error_handler, list_schema)
params = ValidationDecorator(_params, error_handler, list_schema)
rparams = ValidationDecorator(_rparams, error_handler, list_schema)
json_body = ValidationDecorator(_json, error_handler, schema)
|
CLVsol/odoo_addons
|
clv_patient/seq/clv_patient_category_seq.py
|
Python
|
agpl-3.0
| 3,797
| 0.007638
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
def format_code(code_seq):
code = map(int, str(code_seq))
code_len = len(code)
while len(code) < 14:
code.insert(0, 0)
while len(code) < 16:
n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11
if n > 1:
f = 11 - n
else:
f = 0
code.append(f)
code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]),
str(code[2]) + str(code[3]) + str(code[4]),
str(code[5]) +
|
str(code[6]) + str(code[7]),
|
str(code[8]) + str(code[9]) + str(code[10]),
str(code[11]) + str(code[12]) + str(code[13]),
str(code[14]) + str(code[15]))
if code_len <= 3:
code_form = code_str[18 - code_len:21]
elif code_len > 3 and code_len <= 6:
code_form = code_str[17 - code_len:21]
elif code_len > 6 and code_len <= 9:
code_form = code_str[16 - code_len:21]
elif code_len > 9 and code_len <= 12:
code_form = code_str[15 - code_len:21]
elif code_len > 12 and code_len <= 14:
code_form = code_str[14 - code_len:21]
return code_form
class clv_patient_category(models.Model):
_inherit = 'clv_patient.category'
code = fields.Char('Category Code', size=64, select=1, required=False, readonly=False, default='/',
help='Use "/" to get an automatic new Category Code.')
@api.model
def create(self, vals):
if not 'code' in vals or ('code' in vals and vals['code'] == '/'):
code_seq = self.pool.get('ir.sequence').get(self._cr, self._uid, 'clv_patient.category.code')
vals['code'] = format_code(code_seq)
return super(clv_patient_category, self).create(vals)
@api.multi
def write(self, vals):
if 'code' in vals and vals['code'] == '/':
code_seq = self.pool.get('ir.sequence').get(self._cr, self._uid, 'clv_patient.category.code')
vals['code'] = format_code(code_seq)
return super(clv_patient_category, self).write(vals)
@api.one
def copy(self, default=None):
default = dict(default or {})
default.update({'code': '/',})
return super(clv_patient_category, self).copy(default)
|
kmonsoor/windenergytk
|
windenergytk/gwindtk.py
|
Python
|
gpl-3.0
| 30,991
| 0.008648
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# gwindetk.py #
# #
# Part of UMass Amherst's Wind Energy Engineering Toolbox of Mini-Codes #
# (or Mini-Codes for short) #
# #
# Python code by Alec Koumjian - akoumjian@gmail.com #
# #
# This code adapted
|
from the original Visual Basic code at #
# http://www.ceere.org/rerl/projects/software/mini-code-overview.html #
#
|
#
# These tools can be used in conjunction with the textbook #
# "Wind Energy Explained" by J.F. Manwell, J.G. McGowan and A.L. Rogers #
# http://www.ceere.org/rerl/rerl_windenergytext.html #
# #
################################################################################
# Copyright 2009 Alec Koumjian #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import wxversion
wxversion.select('2.8')
import wx
import wx.lib.intctrl as intctrl
import wxmpl
import os
import analysis
import synthesis
import file_ops
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["size"] = (800, 600)
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.active_timeseries = {}
self.__create_objects()
self.__set_properties()
self.__do_layout()
self.__set_bindings()
self.sync_active_listbox()
def __create_objects(self):
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
# File menu
self.file_menu = wx.Menu()
self.import_file = wx.MenuItem(self.file_menu, -1, "Import", "Import timeseries from data file")
self.file_menu.AppendItem(self.import_file)
self.frame_1_menubar.Append(self.file_menu, "File")
# Help Menu
self.help_menu = wx.Menu()
self.help_book = wx.MenuItem(self.help_menu, -1, "Help Index", "How to use this software.")
self.about = wx.MenuItem(self.help_menu, -1, "About","About this software.")
self.help_menu.AppendItem(self.about)
self.frame_1_menubar.Append(self.help_menu, "Help")
# Set Menu Bar
self.SetMenuBar(self.frame_1_menubar)
# Menu Bar end
# Status Bar
self.frame_1_statusbar = self.CreateStatusBar(1, 0)
# Status Bar end
# Tool Bar
self.frame_1_toolbar = wx.ToolBar(self, -1, style=wx.TB_HORIZONTAL|wx.TB_3DBUTTONS)
self.SetToolBar(self.frame_1_toolbar)
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "new", wx.Bitmap("stock_new.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "open", wx.Bitmap("stock_open.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "save", wx.Bitmap("stock_save.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "exit", wx.Bitmap("stock_exit.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
# Tool Bar end
# Top level sizers
self.sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_3 = wx.BoxSizer(wx.VERTICAL)
# Splitter
self.splitter = wx.SplitterWindow(self, -1, style=wx.SP_3DSASH|wx.SP_3DBORDER)
# TS panel widgets
self.sizer_ts = wx.BoxSizer(wx.VERTICAL)
self.sizer_control_ts = wx.BoxSizer(wx.HORIZONTAL)
self.ts_control_panel = wx.Panel(self.splitter, -1)
self.list_box_1 = wx.ListBox(self.ts_control_panel, -1, choices=[], style=wx.LB_MULTIPLE|wx.LB_NEEDED_SB)
self.ts_plot_button = wx.Button(self.ts_control_panel, -1, 'Plot Timeseries')
self.ts_remove_button = wx.Button(self.ts_control_panel, -1, 'Remove')
# Notebook
self.notebook_1 = wx.Notebook(self.splitter, -1, style=wx.NB_LEFT)
self.notebook_1_pane_1 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_2 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_3 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_4 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_5 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_6 = wx.Panel(self.notebook_1, -1)
# Text results panel
self.results_panel = wx.Panel(self, -1, style=wx.SIMPLE_BORDER)
self.results_panel_text = wx.StaticText(self.results_panel, -1, label="Numerical Results Here")
# Graphing panel
self.plot_panel = wxmpl.PlotPanel(self, -1)
# Analysis widgets
self.analysis_sizer = wx.BoxSizer(wx.VERTICAL)
self.stat_button = wx.Button(self.notebook_1_pane_1, -1, 'Statistics')
self.hist_button = wx.Button(self.notebook_1_pane_1, -1, 'Histogram')
self.weibull_button = wx.Button(self.notebook_1_pane_1, -1, 'Weibull Params')
self.corr_button = wx.Button(self.notebook_1_pane_1, -1, 'Correlate')
self.corr_panel = wx.Panel(self.notebook_1_pane_1, -1, style=wx.RAISED_BORDER)
self.corr_panel_btn = wx.Button(self.notebook_1_pane_1, -1, '>>', name='corr')
self.corr_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.rad_autocorr = wx.RadioButton(self.corr_panel, -1, 'Auto', style=wx.RB_GROUP)
self.rad_crosscorr = wx.RadioButton(self.corr_panel, -1, 'Cross')
self.corr_lag_int = intctrl.IntCtrl(self.corr_panel, -1, value=15, min=0)
self.lag_label = wx.StaticText(self.corr_panel, -1, 'No. lags')
self.block_button = wx.Button(self.notebook_1_pane_1, -1, 'Block Average')
self.block_panel = wx.Panel(self.notebook_1_pane_1, -1, style=wx.RAISED_BORDER)
self.block_panel_btn = wx.Button(self.notebook_1_pane_1, -1, '>>', name='block')
self.block_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.block_new_freq = wx.Choice(self.block_panel, -1, choices=['YEARLY','QUARTERLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY'])
self.psd_button = wx.Button(self.notebook_1_pane_1, -1, 'Power Spectral Density')
# End Analysis widgets
# Synthesis widgets
self.synthesis_sizer = wx.BoxSizer(wx.VERTICAL)
self.arma_button = wx.Button(self.notebook_1_pane_2, -1, 'ARMA')
self.arma_panel = wx.Panel(self.notebook_1_pane_2, -1, style=wx.RAISED_BORDER)
self.arma_panel_btn = wx.Butt
|
UManPychron/pychron
|
pychron/lasers/tasks/panes/ablation.py
|
Python
|
apache-2.0
| 4,882
| 0.004506
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traitsui.api import View, Item, VGroup, InstanceEditor, UItem, EnumEditor, \
RangeEditor, spring, HGroup, Group, ButtonEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.core.ui.led_editor import LEDEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.lase
|
rs.tasks.laser_panes import ClientPane
class AblationCO2ClientPane(ClientPane):
def trait_context(self):
ctx = super(AblationCO2ClientPane, self).trait_context()
ctx['tray_calibration'] = self.model.stage_manager.tray_calibration_manager
ctx['stage_m
|
anager'] = self.model.stage_manager
return ctx
def traits_view(self):
pos_grp = VGroup(UItem('move_enabled_button',
editor=ButtonEditor(label_value='move_enabled_label')),
VGroup(HGroup(Item('position'),
UItem('stage_manager.stage_map_name',
editor=EnumEditor(name='stage_manager.stage_map_names')),
UItem('stage_stop_button')),
Item('x', editor=RangeEditor(low_name='stage_manager.xmin',
high_name='stage_manager.xmax')),
Item('y', editor=RangeEditor(low_name='stage_manager.ymin',
high_name='stage_manager.ymax')),
Item('z', editor=RangeEditor(low_name='stage_manager.zmin',
high_name='stage_manager.zmax')),
enabled_when='_move_enabled'),
label='Positioning')
calibration_grp = VGroup(UItem('tray_calibration.style',
enabled_when='not tray_calibration.isCalibrating()'),
UItem('tray_calibration.calibrate',
editor=ButtonEditor(label_value='tray_calibration.calibration_step')),
HGroup(Item('tray_calibration.cx', format_str='%0.3f', style='readonly'),
Item('tray_calibration.cy', format_str='%0.3f', style='readonly')),
Item('tray_calibration.rotation', format_str='%0.3f', style='readonly'),
Item('tray_calibration.scale', format_str='%0.4f', style='readonly'),
Item('tray_calibration.error', format_str='%0.2f', style='readonly'),
UItem('tray_calibration.calibrator', style='custom', editor=InstanceEditor()),
CustomLabel('tray_calibration.calibration_help',
color='green',
height=75, width=300),
label='Tray Calibration')
tgrp = Group(pos_grp, calibration_grp, layout='tabbed')
egrp = HGroup(UItem('enabled', editor=LEDEditor(colors=['red', 'green'])),
UItem('enable', editor=ButtonEditor(label_value='enable_label')),
UItem('fire_laser_button', editor=ButtonEditor(label_value='fire_label'),
enabled_when='enabled'),
Item('output_power', label='Power'),
UItem('units'),
spring,
icon_button_editor('snapshot_button', 'camera'),
icon_button_editor('test_connection_button',
'connect', tooltip='Test Connection'))
v = View(VGroup(egrp, tgrp))
return v
# ============= EOF =============================================
|
ahmadiga/min_edx
|
lms/djangoapps/oauth2_handler/tests.py
|
Python
|
agpl-3.0
| 9,001
| 0.001333
|
# pylint: disable=missing-docstring
from django.core.cache import cache
from django.test.utils import override_settings
from lang_pref import LANGUAGE_KEY
from xmodule.modulestore.tests.factories import (check_mongo_calls, CourseFactory)
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import (CourseInstructorRole, CourseStaffRole, GlobalStaff,
OrgInstructorRole, OrgStaffRole)
from student.tests.factories import UserFactory, UserProfileFactory
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
# Will also run default tests for IDTokens and UserInfo
from oauth2_provider.tests import IDTokenTestCase, UserInfoTestCase
class BaseTestMixin(ModuleStoreTestCase):
profile = None
def setUp(self):
super(BaseTestMixin, self).setUp()
self.course_key = CourseFactory.create(emit_signals=True).id
self.course_id = unicode(self.course_key)
self.user_factory = UserFactory
self.set_user(self.make_user())
def set_user(self, user):
super(BaseTestMixin, self).set_user(user)
s
|
elf.profile = UserProfileFactory(user=s
|
elf.user)
class IDTokenTest(BaseTestMixin, IDTokenTestCase):
def setUp(self):
super(IDTokenTest, self).setUp()
# CourseAccessHandler uses the application cache.
cache.clear()
def test_sub_claim(self):
scopes, claims = self.get_id_token_values('openid')
self.assertIn('openid', scopes)
sub = claims['sub']
expected_sub = anonymous_id_for_user(self.user, None)
self.assertEqual(sub, expected_sub)
def test_user_name_claim(self):
_scopes, claims = self.get_id_token_values('openid profile')
claim_name = claims['name']
user_profile = UserProfile.objects.get(user=self.user)
user_name = user_profile.name
self.assertEqual(claim_name, user_name)
@override_settings(LANGUAGE_CODE='en')
def test_user_without_locale_claim(self):
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
self.assertEqual(claims['locale'], 'en')
def test_user_with_locale_claim(self):
language = 'en'
set_user_preference(self.user, LANGUAGE_KEY, language)
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
locale = claims['locale']
self.assertEqual(language, locale)
def test_no_special_course_access(self):
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values('openid course_instructor course_staff')
self.assertNotIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims)
self.assertNotIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims)
def test_course_staff_courses(self):
CourseStaffRole(self.course_key).add_users(self.user)
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values('openid course_staff')
self.assertIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims) # should not return courses in id_token
def test_course_instructor_courses(self):
with check_mongo_calls(0):
CourseInstructorRole(self.course_key).add_users(self.user)
scopes, claims = self.get_id_token_values('openid course_instructor')
self.assertIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims) # should not return courses in id_token
def test_course_staff_courses_with_claims(self):
CourseStaffRole(self.course_key).add_users(self.user)
course_id = unicode(self.course_key)
nonexistent_course_id = 'some/other/course'
claims = {
'staff_courses': {
'values': [course_id, nonexistent_course_id],
'essential': True,
}
}
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values(scope='openid course_staff', claims=claims)
self.assertIn('course_staff', scopes)
self.assertIn('staff_courses', claims)
self.assertEqual(len(claims['staff_courses']), 1)
self.assertIn(course_id, claims['staff_courses'])
self.assertNotIn(nonexistent_course_id, claims['staff_courses'])
def test_permissions_scope(self):
scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertIn('permissions', scopes)
self.assertFalse(claims['administrator'])
self.user.is_staff = True
self.user.save()
_scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertTrue(claims['administrator'])
class UserInfoTest(BaseTestMixin, UserInfoTestCase):
def setUp(self):
super(UserInfoTest, self).setUp()
# create another course in the DB that only global staff have access to
CourseFactory.create(emit_signals=True)
def token_for_scope(self, scope):
full_scope = 'openid %s' % scope
self.set_access_token_scope(full_scope)
token = self.access_token.token # pylint: disable=no-member
return full_scope, token
def get_with_scope(self, scope):
scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(token, scope)
self.assertEqual(result.status_code, 200)
return claims
def get_with_claim_value(self, scope, claim, values):
_full_scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(
token,
claims={claim: {'values': values}}
)
self.assertEqual(result.status_code, 200)
return claims
def _assert_role_using_scope(self, scope, claim, assert_one_course=True):
with check_mongo_calls(0):
claims = self.get_with_scope(scope)
self.assertEqual(len(claims), 2)
courses = claims[claim]
self.assertIn(self.course_id, courses)
if assert_one_course:
self.assertEqual(len(courses), 1)
def test_request_global_staff_courses_using_scope(self):
GlobalStaff().add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses', assert_one_course=False)
def test_request_org_staff_courses_using_scope(self):
OrgStaffRole(self.course_key.org).add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses')
def test_request_org_instructor_courses_using_scope(self):
OrgInstructorRole(self.course_key.org).add_users(self.user)
self._assert_role_using_scope('course_instructor', 'instructor_courses')
def test_request_staff_courses_using_scope(self):
CourseStaffRole(self.course_key).add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses')
def test_request_instructor_courses_using_scope(self):
CourseInstructorRole(self.course_key).add_users(self.user)
self._assert_role_using_scope('course_instructor', 'instructor_courses')
def _assert_role_using_claim(self, scope, claim):
values = [self.course_id, 'some_invalid_course']
with check_mongo_calls(0):
claims = self.get_with_claim_value(scope, claim, values)
self.assertEqual(len(claims), 2)
courses = claims[claim]
self.assertIn(self.course_id, courses)
self.assertEqual(len(courses), 1)
def test_request_global_staff_courses_with_claims(self):
GlobalStaff().add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_org_staff_courses_with_claims(self):
OrgStaffRole(self.course_key.org).add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_org_instructor_courses_with_claims(self):
OrgInstructorRole(self.course_key.org).add_users(self.user)
|
ImTheTom/discordBot
|
cogs/search.py
|
Python
|
mit
| 12,276
| 0.008635
|
#import list
import asyncio
import discord
from discord.ext import commands
import importlib.machinery
import datetime
import requests
import json
from bs4 import BeautifulSoup as BS
from requests import get as re_get
from random import *
from re import findall, match, search
TooBig = ["You know you make a cool bot and all people want to do is kill it :(", "Dude that is wayyyyy too much for me to handle",
"Nobody thinks you are cool", "fark", "yes", "No", "I don't like people like you",
"Ehh", "stop it you are hurting me" , "hackerman", "Dude stop it.", "I can't believe you requested that", "meh","maybe",
"How about you do that one hey", "Hang on", "nahh maybe I don't", "you can do that one", "You take over now", "you can handle the requests from now on, I'm done"]
def under(content):
if(len(content)<3000):
return True
return False
#Strips the message to find what the actual content that needs to be searched
def youtube(msg):
url = "https://www.youtube.com/results?search_query="+msg
bs = BS(re_get(url).text, "html.parser")
items = bs.find("div", id="results").find_all("div", class_="yt-lockup-content")
i= 0
found = False
while not found and i < 20:
href = items[i].find("a", class_="yt-uix-sessionlink")["href"]
if href.startswith("/watch"):
found = True
return href
if not found:
i+=1
return href
def youtubeDetails(msg):
url = msg
bs = BS(re_get(url).text, "html.parser")
title = bs.find(class_="watch-title")
title = title.contents[0]
title = title[5:-3]
views = bs.find(class_="watch-view-count")
views = views.contents[0]
views = views[:-6]
like = bs.find_all(class_="yt-uix-button-content")
likes = like[19]
likes = likes.contents[0]
dislikes = like[20]
dislikes = dislikes.contents[0]
date = bs.find(class_="watch-time-text")
date = date.contents[0]
date = date[13:]
subCount = bs.find(class_="yt-subscription-button-subscriber-count-branded-horizontal yt-subscriber-count")
subCount = subCount.contents[0]
uploader = bs.find(class_="yt-user-info")
uploader = uploader.find('a').contents[0]
string = "Name: "+title+".\n\nViews: "+views+". Uploaded: "+date+"\n\nUploader: "+uploader+". Subscribers: "+subCount+".\n\nLikes: "+likes+". Dislikes: "+dislikes+".\n\nURL: "+url
return string
def googleSearch(url):
try:
bs = BS(re_get(url).text, "html.parser")
links = bs.find(class_="r")
links = links.find('a', href=True)
first = links.contents
i =0
length = len(first)
string = ""
while(i<length):
string = string +str(first[i])
i+=1
string = stripOfSpecialCharacters(string)
link = links['href']
link = link[7:]
indexofAnd = link.index('&')
link = link [:indexofAnd]
results = bs.find(id="resultStats")
results = results.contents[0]
results = results[:-8]
description = bs.find(class_="st")
description = descr
|
iption.contents
i=0
length=len(description)
d=""
while(i<length):
d = d +str(description[i])
i+=1
d = stripOfSpecialCharacters(d)
message= "Search URL: "+url+"\n\nResults: "+resul
|
ts+".\n\nFirst URL: "+link+"\n\nTitle: "+string+".\n\nDescription: "+d
return message
except:
message = "Something went wrong here. Either I fucked up or you fucked up. My money is on you that fucked it up."
return message
def urbanSearch(url):
try:
bs = BS(re_get(url).text, "html.parser")
links = bs.find(class_="def-header")
links = links.find('a', href=True)
links = links.contents[0]
for a in bs.findAll('a'):
del a['href']
meaning = bs.find(class_="meaning")
meaning = meaning.contents
string = ""
i=0
length = len(meaning)
while(i<length):
string = string+str(meaning[i])
i+=1
meaning = stripOfSpecialCharacters(string)
example = bs.find(class_="example")
example = example.contents
string = ""
i=0
length = len(example)
while(i<length):
string = string+str(example[i])
i+=1
example = stripOfSpecialCharacters(string)
contribuation = bs.find(class_="contributor")
contribuation = contribuation.contents
author = contribuation[1]
author = stripOfSpecialCharacters(str(author))
date= contribuation[2]
date = str(date)
date = date[1:]
tags = bs.find(class_="tags")
tags = tags.contents
length = len(tags)
i=0
string =""
while(i<length):
string = string+str(tags[i])+' '
i+=1
tags = stripOfSpecialCharacters(string)
counts = bs.find_all(class_="count")
likes = counts[0]
dislikes = counts[1]
likes = likes.contents[0]
dislikes = dislikes.contents[0]
string = links+"\n\nMeaning: \n" + meaning+"\n\nExamples: \n"+example+"\n\nTags: "+tags+"\n\nAuthor: "+author+".\nDate: "+date+".\nLikes: "+str(likes)+ ". Dislikes: "+str(dislikes)+".\n\nURL: "+url
return string
except:
string = "Something went wrong here. Either I fucked up or you fucked up. My money is on you that fucked it up."
return string
def opSearch(url):
try:
bs = BS(re_get(url).text, "html.parser")
name = bs.find(class_="Name")
name = name.contents[0]
try:
rank = bs.find(class_="TierRank")
rank = rank.contents
rank = rank[1]
rankSolo = rank.contents[0]
rank = bs.find_all(class_="TierRank")
rank =rank[1]
flex = rank.contents
flex = flex[1]
flex = flex.contents[0]
except:
rank ="unranked"
flex = "unranked"
try:
wins = bs.find(class_="win")
wins = wins.contents[0]
loss = bs.find(class_="lose")
loss = loss.contents[0]
except:
wins ="little or no games played"
loss = "little or no games played"
try:
champ = bs.find_all(class_="ChampionName")
first = champ[0]
first = first.contents[1]
first = str(first)
first = stripOfSpecialCharacters(first)
first=stripSpaces(first)
first = first.strip()
except:
first =""
try:
champ = bs.find_all(class_="ChampionName")
second = champ[1]
second = second.contents[1]
second = str(second)
second = stripOfSpecialCharacters(second)
second=stripSpaces(second)
second = second.strip()
except:
second =""
try:
champ = bs.find_all(class_="ChampionName")
third = champ[2]
third = third.contents[1]
third = str(third)
third = stripOfSpecialCharacters(third)
third=stripSpaces(third)
third = third.strip()
except:
third = ""
string = name+"\n\nRank Solo: "+rankSolo+". Rank Flex: "+flex+".\nWins: "+wins+". Losses: "+loss+".\n\nChampions: "+first+", "+second+", "+third+".\n\nURL: "+url
return string
except:
string = "Something went wrong here. Either I fucked up or you fucked up. My money is on you that fucked it up."
return string
def stripSpaces(string):
string = string.replace(' ', '')
string = string.replace(' ', '')
return string
def stripOfSpecialCharacters(string):
string = string.replace('<b>', '')
string = string.replace('</b>', '')
string = string.replace('<i>', '')
string = string.replace('</i>', '')
string = string.replace('<a>', '')
string = string.replace('</a>', '')
string = string.replace('<br/>', '')
string = string.rep
|
Awingu/open-ovf
|
py/tests/OvfEnvironmentTestCase.py
|
Python
|
epl-1.0
| 22,345
| 0.002775
|
#!/usr/bin/python
# vi: ts=4 expandtab syntax=python
##############################################################################
# Copyright (c) 2008 IBM Corporation
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Sharad Mishra (IBM) - initial implementation
##############################################################################
"""
pyUnit tests
"""
import os, unittest
from xml.dom.minidom import parse
from ovf.env import EnvironmentSection
from ovf.env import PlatformSection
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files/")
SCHEMA_FILES_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "..")
OVF_ENV_XSD = os.path.join(os.path.dirname(__file__), "..", "..",
"schemas", "ovf-environment.xsd")
def isSamePath(a, b):
""" test if two paths are same. """
return ( os.path.abspath(a) == os.path.abspath(b) )
class OvfEnvironmentTestCase (unittest.TestCase):
""" Test EnvironmentSection """
path = TEST_FILES_DIR
fileName = path + 'test-environment.xml'
document = parse(fileName)
ovfEnv = None
ovfEnv2 = None
envID = "22"
newID = "1234"
entID = "2"
propdata = {'fillerkey':'fillervalue', 'propkey':'propvalue' }
platdata = PlatformSection.PlatformSection({'Kind': "ESX Server",
'Vendor': "VMware, Inc.",
'Version': "3.0.1",
'Locale': "en_US"})
def setUp(self):
""" Setup the test."""
self.ovfEnv = EnvironmentSection.EnvironmentSection(self.fileName)
self.ovfEnv2 = EnvironmentSection.EnvironmentSection()
def test_NewObj(self):
""" Verify that a new EnvironmentSection object is created."""
assert isSamePath(self.ovfEnv.path, self.fileName),"object not created"
def testInitNoInputFile(self):
""" Tests EnvironmentSection's __init__ method."""
assert self.ovfEnv2.id == None, "object id should be None."
assert self.ovfEnv2.document == None, "document should be None."
assert self.ovfEnv2.environment == None, \
"environment element should be None."
assert self.ovfEnv2.oldChild == None, "oldChild should be None."
assert self.ovfEnv2.path == None, "path should be None."
def testCreateHeaderNew(self):
""" Tests the createHeader method. No file is parsed."""
self.assertEquals(self.ovfEnv2.environment, None)
self.ovfEnv2.createHeader(None)
self.assertNotEqual(self.ovfEnv2.environment, None)
self.ovfEnv2.generateXML("out.xml")
rc = EnvironmentSection.validateXML("out.xml", OVF_ENV_XSD)
self.assertEquals(rc, 0)
def testCreateHeaderExisting(self):
""" Test createHeader when an existing xml file is passed as input."""
self.ovfEnv.createHeader(self.envID)
self.assertNotEqual(self.ovfEnv.environment, None)
self.assertEqual\
(self.ovfEnv.environment.attributes['ovfenv:id'].value, self.envID)
self.ovfEnv.generateXML("out.xml")
rc = EnvironmentSection.validateXML("out.xml", OVF_ENV_XSD)
self.assertEquals(rc, 0)
def testCreateSection(self):
""" Test createSection with no existing ovf-env.xml file."""
self.ovfEnv2.createHeader()
# Verify that VauleError is raised when creating section without
# an ID and SectionName.
self.assertRaises(ValueError, self.ovfEnv2.createSection, None, None)
# Verify that Entity is not created when no ID is given.
self.assertRaises\
(ValueError, self.ovfEnv2.createSection, None, "Entity")
# Verify that a new Entity is created.
self.ovfEnv2.createSection (self.envID, "Entity")
self.assertNotEquals\
(self.ovfEnv2.document.getElementsByTagName('Entity'), [])
self.assertEquals\
(self.ovfEnv2.document.getElementsByTagName('PropertySection'), [])
# Verify that a Platform section is created.
self.ovfEnv2.createSection(self.envID, "PlatformSection", self.platdata)
element = self.ovfEnv2.document.getElementsByTagName("Version")
self.assertEquals(element[0].firstChild.data, "3.0.1")
self.ovfEnv2.generateXML("out.xml")
rc = EnvironmentSection.validateXML("out.xml", OVF_ENV_XSD)
self.assertEquals(rc, 0)
def testCreateSectionWithID(self):
""" Test createSection with an existing ovf-env.xml file. """
# Verify that ValueError is raised when new Platform or Property
# section is created without Entity.
self.assertRaises\
(ValueError, self.ovfEnv.createSection, self.newID, "PlatformSection")
self.assertRaises\
(ValueError, self.ovfEnv.createSection, self.newID, "PropertySection")
# Verify that envID is updated for the header section.
self.ovfEnv.createHeader(self.envID)
self.assertEqual\
(self.ovfEnv.environment.attributes['ovfenv:id'].value, self.envID)
# Verify that a second PlatformSection is added to header.
self.ovfEnv.createSection(self.envID, "PlatformSection", self.platdata)
element = self.ovfEnv.document.getElementsByTagName("PlatformSection")
self.assertEquals(element.length, 3)
# Verify that another PropertySection is added to the xml.
self.ovfEnv.createSection(self.envID, "PropertySection", self.propdata)
element = self.ovfEnv.findElementById(self.envID)
section = element.getElementsByTagName("PropertySection")
|
self.assertEquals(section.length, 3)
# Verify that a new PlatformSection is added to an
# existin
|
g Entity.
self.ovfEnv.createSection(self.entID, "PlatformSection", self.platdata)
section = self.ovfEnv.findElementById(self.entID)
element = section.getElementsByTagName("Version")
self.assertEquals(element.length, 1)
# Verify that a new PropertySection is added to an
# existing Entity.
self.ovfEnv.createSection(self.entID, "PropertySection", self.propdata)
element = self.ovfEnv.findElementById(self.entID)
section = element.getElementsByTagName("PropertySection")
self.assertEquals(section.length, 2)
# Verify that a new Platform/Property section cannot be
# created with a new ID. An Entity by that ID should exist.
self.assertRaises(ValueError, self.ovfEnv.createSection, self.newID,
"PlatformSection", self.platdata)
self.assertRaises(ValueError, self.ovfEnv.createSection, self.newID,
"PropertySection", self.propdata)
# Verify that a new platform section is created with given data.
self.ovfEnv.createSection(self.newID, "Entity")
self.ovfEnv.createSection(self.newID, "PlatformSection",
PlatformSection.PlatformSection(
{'Kind': "ESX Server",
'Version': "3.0.1",
'Vendor': "VMware, Inc.",
'Locale': "en_US"}))
section = self.ovfEnv.findElementById(self.newID)
element = section.getElementsByTagName("Version")
self.assertEquals(element[0].firstChild.data, "3.0.1")
# Verify that a new property section is created with given data.
self.ovfEnv.createSection(self.newID, "PropertySection", self.propdata)
element = self.ovfEnv.findElementById(self.newID)
section = element.getElementsByTagName("PropertySection")
props = section[0].getElementsByTagName("Property")
for i in range(0, props.length):
if props[i].getAttribute('ovfenv:key') == "propkey":
self.assertEquals(props[i].getAttribute('ovfenv:value'),
"propvalue")
#
|
gratipay/gratipay.com
|
gratipay/utils/icons.py
|
Python
|
mit
| 341
| 0.046921
|
STATUS_ICONS = { "success": ""
, "warning": ""
|
, "failure": ""
, "feature": ""
}
REVIEW_MAP = { 'approved': 'success'
|
, 'unreviewed': 'warning'
, 'rejected': 'failure'
, 'featured': 'feature'
}
|
crashtack/django-imager
|
imager_profile/apps.py
|
Python
|
mit
| 260
| 0
|
from django.apps import AppConfig
class ImagerProfileAppConfig(AppConf
|
ig):
name = "imager_profile"
verbose_name = "Imager User Profile"
def ready(self):
"""code to run when t
|
he app is ready"""
from imager_profile import handlers
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/webdriver/tests/perform_actions/support/mouse.py
|
Python
|
bsd-3-clause
| 927
| 0.004315
|
def get_viewport_rect(session):
return session.execute_script("""
return {
height: window.innerHeight || document.documentElement.clientHeight,
width: window.innerWidth || document.documentElement.clientWidth,
};
""")
def get_inview_center(elem_rect, viewport_rect):
x = {
"left": max(0, min(elem_rect["x"], elem_rect["x"] + elem_rect["width"])),
"right": min(viewport_rect["width"], max(elem_rect["x"],
elem_rect["x"] + elem_rect["width"])),
}
y = {
"top": max(0, min(elem_rect["y"], elem_rect["y"] + elem_rect["height"])),
"bottom": min(viewport_rect["height"], max(elem_rect["y"],
elem_rect["y"] + elem_rect["he
|
ight"])),
}
return {
"x": (x["left"] + x["right"]) / 2,
|
"y": (y["top"] + y["bottom"]) / 2,
}
|
neuropoly/spinalcordtoolbox
|
spinalcordtoolbox/scripts/sct_compute_mtr.py
|
Python
|
mit
| 2,861
| 0.003146
|
#!/usr/bin/env python
#########################################################################################
#
# Compute magnetization transfer ratio (MTR).
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad
# Modified: 2014-09-21
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import os
from spinalcordtoolbox.utils import SCTArgumentParser, Metavar, init_sct, display_viewer_syntax, printv, set_loglevel
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.qmri.mt import compute_mtr
def get_parser():
parser = SCTArgumentParser(
description='Compute magnetization transfer ratio (MTR). Output is given in percentage.'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
'-mt0',
required=True,
help='Image without MT pulse (MT0)',
metavar=Metavar.float,
)
mandatoryArguments.add_argument(
'-mt1',
required=True,
help='Image with MT pulse (MT1)',
metavar=Metavar.float,
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-thr",
type=float,
help="Threshold to clip MTR output values in case of division by small number. This implies that the output image"
"range will be [-thr, +thr]. Default: 100.",
default=100
)
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
optional.add_argument(
'-o',
help='Path to output file.',
metavar=Metavar.str,
default=os.path.join('.', 'mtr.nii.gz')
)
return parser
def main(argv=None):
pa
|
rser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
fname_mtr = arguments.o
# compute MTR
printv('\nCompute MTR...', verbose)
nii_mtr = compute_mtr(nii_mt1=Image(arguments.mt1), nii_mt0=Image(arguments.mt0), threshold_mtr=arguments.thr)
# save MTR file
nii_mtr.save(fname_mtr, dtype='float32')
display_viewer_syntax([arguments.mt0, arguments.mt1, fname_mtr])
if __n
|
ame__ == "__main__":
init_sct()
main(sys.argv[1:])
|
Discountrobot/Headless
|
headless/checkLogins.py
|
Python
|
mit
| 715
| 0.020979
|
import json
import urllib
import urllib2
import sys
for login in json.load(open(sys.argv[1])):
if login['EmailAddress']:
try:
# encode the json
data = urllib.urlencode(login)
# make the POST request
#
response = urllib2.urlopen('https://login.eovendo.com', data, 10)
# encode the repsonse in json.
jr = json.loads(response.read())
# if we get a returnUrl the account is valid
if jr['returnUrl']:
print login['EmailAddress'] + " is valid.."
else:
print login['EmailAddress'].encode('utf-8') + ' fai
|
led with msg: ' + jr['Message'].encode('utf-8')
except Exception, e:
pr
|
int e
else:
print str(login) + " isn't valid"
|
liqd/a4-meinberlin
|
meinberlin/apps/organisations/migrations/0007_remove_organisation_type.py
|
Python
|
agpl-3.0
| 420
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-04 16:09
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('meinberlin_organisations', '0006_update_orga_type_string'),
]
operations = [
migrations.RemoveField(
model_name='organisation',
|
name='type',
),
]
|
johnnyliu27/openmc
|
openmc/lattice.py
|
Python
|
mit
| 53,509
| 0.000523
|
from abc import ABCMeta
from collections import OrderedDict
from collections.abc import Iterable
from copy import deepcopy
from math import sqrt, floor
from numbers import Real, Integral
from xml.etree import ElementTree as ET
import numpy as np
import openmc.checkvalue as cv
import openmc
from openmc._xml import get_text
from openmc.mixin import IDManagerMixin
class Lattice(IDManagerMixin, metaclass=ABCMeta):
"""A repeating structure wherein each element is a universe.
Parameters
----------
lattice_id : int, optional
Unique identifier for the lattice. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the lattice. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the lattice
name : str
Name of the lattice
pitch : Iterable of float
Pitch of the lattice in each direction in cm
outer : openmc.Universe
A universe to fill all space outside the lattice
universes : Iterable of Iterable of openmc.Universe
A two- or three-dimensional list/array of universes filling each element
of the lattice
"""
next_id = 1
used_ids = openmc.Universe.used_ids
def __init__(self, lattice_id=None, name=''):
# Initialize Lattice class attributes
self.id = lattice_id
self.name = name
self._pitch = None
self._outer = None
self._universes = None
@property
def name(self):
return self._name
@property
def pitch(self):
return self._pitch
@property
def outer(self):
return self._outer
@property
def universes(self):
return self._universes
@name.setter
def name(self, name):
if name is not None:
cv.check_type('lattice name', name, str)
self._name = name
else:
self._name = ''
@outer.setter
def outer(self, outer):
cv.check_type('outer universe', outer, openmc.Universe)
self._outer = outer
@staticmethod
def from_hdf5(group, universes):
"""Create lattice from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
|
universes : dict
|
Dictionary mapping universe IDs to instances of
:class:`openmc.Universe`.
Returns
-------
openmc.Lattice
Instance of lattice subclass
"""
lattice_id = int(group.name.split('/')[-1].lstrip('lattice '))
name = group['name'].value.decode() if 'name' in group else ''
lattice_type = group['type'].value.decode()
if lattice_type == 'rectangular':
dimension = group['dimension'][...]
lower_left = group['lower_left'][...]
pitch = group['pitch'][...]
outer = group['outer'].value
universe_ids = group['universes'][...]
# Create the Lattice
lattice = openmc.RectLattice(lattice_id, name)
lattice.lower_left = lower_left
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void
if outer >= 0:
lattice.outer = universes[outer]
# Build array of Universe pointers for the Lattice
uarray = np.empty(universe_ids.shape, dtype=openmc.Universe)
for z in range(universe_ids.shape[0]):
for y in range(universe_ids.shape[1]):
for x in range(universe_ids.shape[2]):
uarray[z, y, x] = universes[universe_ids[z, y, x]]
# Use 2D NumPy array to store lattice universes for 2D lattices
if len(dimension) == 2:
uarray = np.squeeze(uarray)
uarray = np.atleast_2d(uarray)
# Set the universes for the lattice
lattice.universes = uarray
elif lattice_type == 'hexagonal':
n_rings = group['n_rings'].value
n_axial = group['n_axial'].value
center = group['center'][...]
pitch = group['pitch'][...]
outer = group['outer'].value
universe_ids = group['universes'][...]
# Create the Lattice
lattice = openmc.HexLattice(lattice_id, name)
lattice.center = center
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void
if outer >= 0:
lattice.outer = universes[outer]
# Build array of Universe pointers for the Lattice. Note that
# we need to convert between the HDF5's square array of
# (x, alpha, z) to the Python API's format of a ragged nested
# list of (z, ring, theta).
uarray = []
for z in range(n_axial):
# Add a list for this axial level.
uarray.append([])
x = n_rings - 1
a = 2*n_rings - 2
for r in range(n_rings - 1, 0, -1):
# Add a list for this ring.
uarray[-1].append([])
# Climb down the top-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
a -= 1
# Climb down the right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a -= 1
# Climb down the bottom-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
# Climb up the bottom-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
a += 1
# Climb up the left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a += 1
# Climb up the top-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
# Move down to the next ring.
a -= 1
# Convert the ids into Universe objects.
uarray[-1][-1] = [universes[u_id]
for u_id in uarray[-1][-1]]
# Handle the degenerate center ring separately.
u_id = universe_ids[z, a, x]
uarray[-1].append([universes[u_id]])
# Add the universes to the lattice.
if len(pitch) == 2:
# Lattice is 3D
lattice.universes = uarray
else:
# Lattice is 2D; extract the only axial level
lattice.universes = uarray[0]
return lattice
def get_unique_universes(self):
"""Determine all unique universes in the lattice
Returns
-------
universes : collections.OrderedDict
Dictionary whose keys are universe IDs and values are
:class:`openmc.Universe` instances
"""
univs = OrderedDict()
for k in range(len(self._universes)):
for j in range(len(self._universes[k])):
if isinstance(self._universes[k][j], openmc.Universe):
u = self._universes[k][j]
univs[u._id] = u
else:
for i in range(len(self._universes[k][j])):
u = self._universes[k][j][i]
assert isinstance(u, openmc.Universe)
univs[u._id] = u
if self.outer is not None:
univs[self.outer._id] = self.outer
return univs
def get_nuclides(self):
"""Returns all nuclides in the lattice
Returns
-------
|
dilawar/moose-core
|
python/moose/neuroml2/hhfit.py
|
Python
|
gpl-3.0
| 8,486
| 0.001414
|
# -*- coding: utf-8 -*-
# hhfit.py ---
# Description:
# Author:
# Maintainer:
# Created: Tue May 21 16:31:56 2013 (+0530)
# Commentary:
# Functions for fitting common equations for Hodgkin-Huxley type gate
# equations.
import traceback
import warnings
import numpy as np
import logging
logger_ = logging.getLogger('moose.nml2.hhfit')
try:
import scipy.optimize as _SO
except ImportError:
raise RuntimeError("To use this feature/module, please install scipy")
def exponential2(x, a, scale, x0, y0=0):
res = a * np.exp((x - x0) / scale) + y0
#print('============ Calculating exponential2 for %s, a=%s, scale=%s, x0=%s, y0=%s; = %s'%(x, a, scale, x0, y0, res))
return res
def exponential(x, a, k, x0, y0=0):
res = a * np.exp(k * (x - x0)) + y0
#print('============ Calculating exponential for %s, a=%s, k=%s, x0=%s, y0=%s; = %s'%(x, a, k, x0, y0, res))
return res
def sigmoid2(x, a, scale, x0, y0=0):
res = a / (np.exp(-1 * (x - x0) / scale) + 1.0) + y0
#print('============ Calculating sigmoid for %s, a=%s, scale=%s, x0=%s, y0=%s; = %s'%(x, a, scale, x0, y0, res))
return res
def sigmoid(x, a, k, x0, y0=0):
res = a / (np.exp(k * (x - x0)) + 1.0) + y0
#print('============ Calculating sigmoid for %s, a=%s, k=%s, x0=%s, y0=%s; = %s'%(x, a, k, x0, y0, res))
return res
def linoid2(x, a, scale, x0, y0=0):
"""The so called linoid function. Called explinear in neuroml."""
denominator = 1 - np.exp(-1 * (x - x0) / scale)
# Linoid often includes a zero denominator - we need to fill those
# points with interpolated values (interpolation is simpler than
# finding limits).
ret = (a / scale) * (x - x0) / denominator
infidx = np.flatnonzero((ret == np.inf) | (ret == -np.inf))
if len(infidx) > 0:
for ii in infidx:
if ii == 0:
ret[ii] = ret[ii + 1] - (ret[ii + 2] - ret[ii + 1])
elif ii == len(ret):
ret[ii] = ret[ii - 1] + (ret[ii - 1] - ret[ii - 2])
else:
ret[ii] = (ret[ii + 1] + ret[ii + 2]) * 0.5
res = ret + y0
#print('============ Calculating linoid2 for %s, a=%s, scale=%s, x0=%s, y0=%s; res=%s'%(x, a, scale, x0, y0,res))
return res
def linoid(x, a, k, x0, y0=0):
"""The so called linoid function. Called explinear in neuroml."""
denominator = np.exp(k * (x - x0)) - 1.0
# Linoid often includes a zero denominator - we need to fill those
# points with interpolated values (interpolation is simpler than
# finding limits).
ret = a * (x - x0) / denominator
infidx = np.flatnonzero((ret == np.inf) | (ret == -np.inf))
if len(infidx) > 0:
for ii in infidx:
if ii == 0:
ret[ii] = ret[ii + 1] - (ret[ii + 2] - ret[ii + 1])
elif ii == len(ret):
ret[ii] = ret[ii - 1] + (ret[ii - 1] - ret[ii - 2])
else:
ret[ii] = (ret[ii + 1] + ret[ii + 2]) * 0.5
res = ret + y0
#print('============ Calculating linoid for %s, a=%s, k=%s, x0=%s, y0=%s; res=%s'%(x, a, k, x0, y0,res))
return res
def double_exp(x, a, k1, x1, k2, x2, y0=0):
"""For functions of the form:
a / (exp(k1 * (x - x1)) + exp(k2 * (x - x2)))
"""
ret = np.zeros(len(x))
try:
ret = a / (np.exp(k1 * (x - x1)) + np.exp(k2 * (x - x2))) + y0
except RuntimeWarning as e:
logger_.warn(e)
return ret
# Map from the above functions to corresponding neuroml class
fn_rate_map = {
exponential: 'HHExpRate',
sigmoid: 'HHSigmoidRate',
linoid: 'HHExpLinearRate',
double_exp: None,
}
# These are default starting parameter values
fn_p0_map = {
exponential: (1.0, -100, 20e-3, 0.0),
sigmoid: (1.0, 1.0, 0.0, 0.0),
linoid: (1.0, 1.0, 0.0, 0.0),
double_exp: (1e-3, -1.0, 0.0, 1.0, 0.0, 0.0),
}
def randomized_curve_fit(fn, x, y, maxiter=10, best=True):
"""Repeatedly search for a good fit for common gate functions for
HHtype channels with randomly generated initial parameter
set. This function first tries with default p0 for fn. If that
fails to find a good fit, (correlation coeff returned by curve_fit
being inf is an indication of this), it goes on to generate random
p0 arrays and try scipy.optimize.curve_fit using this p0 until it
finds a good fit or the number of iterations reaches maxiter.
Ideally we should be doing something like stochastic gradient
descent, but I don't know if that might have performance issue in
pure python. The random parameterization in the present function
uses uniformly distributed random numbers within the half-open
interval [min(x), max(x)). The reason for choosing this: the
offset used in the exponential parts of Boltzman-type/HH-type
equations are usually within the domain of x. I also invert the
second entry (p0[1], because it is always (one of) the scale
factor(s) and usually 1/v for some v in the domain of x. I have
not tested the utility of this inversion. Even without this
inversion, with maxiter=100 this function is successful for the
test cases.
Parameters
----------
x: ndarray
values of the independent variable
y: ndarray
sample values of the dependent variable
maxiter: int
maximum number of iterations
best: bool
if true, repeat curve_fit for maxiter and return the case of least
squared error.
Returns
-------
The return value of scipy.optimize.curve_fit which succeed, or the
last call to it if maxiter iterations is reached..
"""
bad = True
p0 = fn_p0_map[fn]
p = None
p_best = None
min_err = 1e10 # A large value as placeholder
for ii in range(maxiter):
try:
p = _SO.curve_fit(fn, x, y, p0=p0)
except (RuntimeError, RuntimeWarning):
p = None
# The last entry returned by scipy.optimize.leastsq used by
# curve_fit is 1, 2, 3 or 4 if it succeeds.
bad = (p is None) or (p[1] == np.inf).any()
if not bad:
if not best:
return p
err = sum((y - fn(x, *tuple(p[0])))**2)
if err < min_err:
min_err = err
p_best = p
p0 = np.random.uniform(low=min(x),
high=max(x),
size=len(fn_p0_map[fn]))
if p0[1] != 0.0:
p0[1] = 1 / p0[1] # k = 1/v_scale - could help faster convergence
if p_best is None:
if p is not None:
msg = p[-2]
else:
msg = ''
wa
|
rnings.warn(
'Maximum iteration %d reached. Could not find a decent fit. %s' %
(maxiter, msg), RuntimeWarning)
return p_best
def find_ratefn(x, y, **kwargs):
"""Find the function that fits the rate function best. This will try
exponential, sigmoid and linoid and return the best fit.
Needed until NeuroML2 supports tables or MOOSE supports
functions.
Parameters
----------
x: 1D array
independent variable.
|
y: 1D array
function values.
**kwargs: keyword arguments
passed to randomized_curve_fit.
Returns
-------
best_fn: function
the best fit function.
best_p: tuple
the optimal parameter values for the best fit function.
"""
rms_error = 1e10 # arbitrarily setting this
best_fn = None
best_p = None
for fn in fn_rate_map:
p = randomized_curve_fit(fn, x, y, **kwargs)
if p is None:
continue
popt = p[0]
error = y - fn(x, *popt)
erms = np.sqrt(np.mean(error**2))
# Ideally I want a fuzzy selection criterion here - a
# preference for fewer parameters, but if the errors are
# really small then we go for functions with more number of
# parameters. Some kind of weighted decision would have been
# nice. I am arbitrarily setting less than 0.1% relative error
# as a strong argument for taking a longer parameter function
# as a really better fit. Even with 1%, double exponential
# betters d
|
UrbanCCD-UChicago/plenario
|
plenario/models/SensorNetwork.py
|
Python
|
mit
| 8,829
| 0.00068
|
import json
from geoalchemy2 import Geometry
from sqlalchemy import BigInteger, Boolean, Column, DateTime, Float, ForeignKey, ForeignKeyConstraint, String, Table, \
func as sqla_fn
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, JSONB
from sqlalchemy.orm import relationship
from plenario.database import postgres_base, postgres_engine, postgres_session, redshift_base
sensor_to_node = Table(
'sensor__sensor_to_node',
postgres_base.metadata,
Column('sensor', String, ForeignKey('sensor__sensor_metadata.name')),
Column('network', String),
Column('node', String),
ForeignKeyConstraint(
['network', 'node'],
['sensor__node_metadata.sensor_network', 'sensor__node_metadata.id']
)
)
feature_to_network = Table(
'sensor__feature_to_network',
postgres_base.metadata,
Column('feature', String, ForeignKey('sensor__feature_metadata.name')),
Column('network', String, ForeignKey('sensor__network_metadata.name'))
)
def knn(lng, lat, k, network, sensors):
"""Execute a spatial query to select k nearest neighbors given some point.
:param lng: (float) longitude
:param lat: (float) latitude
:param k: (int) number of results to return
:returns: (list) of nearest k neighbors
"""
# Convert lng-lat to geojson point
point = "'" + json.dumps({
'type': 'Point',
'coordinates': [lng, lat]
}) + "'"
# How many to limit the initial bounding box query to
k_10 = k * 10
# Based off snippet provided on pg 253 of PostGIS In Action (2nd Edition)
query = """
WITH bbox_results AS (
SELECT
node,
location,
array_agg(sensor) AS sensors,
(SELECT ST_SetSRID(ST_GeomFromGeoJSON({geojson}), 4326)) AS ref_geom
FROM
sensor__node_metadata JOIN sensor__sensor_to_node
ON id=node
WHERE
sensor_network = '{network}'
GROUP BY
node,
location
ORDER BY
location <#> (SELECT ST_SetSRID(ST_GeomFromGeoJSON ({geojson}), 4326))
LIMIT {k_10}
)
SELECT
node,
RANK() OVER(ORDER BY ST_Distance(location, ref_geom)) AS act_r
FROM bbox_results
WHERE
sensors && '{sensors}'::VARCHAR[]
ORDER BY act_r
LIMIT {k};
""".format(
geojson=point,
network=network,
k=k,
k_10=k_10,
sensors='{' + ','.join(sensors) + '}'
)
return postgres_engine.execute(query).fetchall()
class NetworkMeta(postgres_base):
__tablename__ = 'sensor__network_metadata'
name = Column(String, primary_key=True)
nodes = relationship('NodeMeta')
info = Column(JSONB)
@staticmethod
def index():
networks = postgres_session.query(NetworkMeta)
return [network.name.lower() for network in networks]
def __repr__(self):
return '<Network {!r}>'.format(self.name)
def tree(self):
sensor_tree_fn = sqla_fn.network_tree(self.name)
sensor_tree_result_proxy = self.query.session.execute(sensor_tree_fn)
return sensor_tree_result_proxy.scalar()
def sensors(self) -> set:
keys = []
for sensor in self.tree().values():
keys += sensor
return set(keys)
def features(self):
keys = []
for sensor in self.tree().values():
|
for feature in sensor.values():
keys += feature.values()
return set([k.split('.')[0] for k in keys])
class NodeMeta(postgres_base):
__tablename__ = 'sensor__node_metadata'
id = Column(String, primary_key=True)
sensor_network = Column(String, ForeignKey('sensor__network_metadata.name'), primary_key=True)
location = Column(Geometry(geometry_type='POINT', srid=4326))
sensors = relationship('S
|
ensorMeta', secondary='sensor__sensor_to_node')
info = Column(JSONB)
address = Column(String)
column_editable_list = ('sensors', 'info')
@staticmethod
def all(network_name):
query = NodeMeta.query.filter(NodeMeta.sensor_network == network_name)
return query.all()
@staticmethod
def index(network_name):
return [node.id for node in NodeMeta.all(network_name)]
@staticmethod
def nearest_neighbor_to(lng, lat, network, features):
sensors = set()
for feature in features:
feature = FeatureMeta.query.get(feature)
sensors = sensors | feature.sensors()
return knn(
lng=lng,
lat=lat,
network=network,
sensors=sensors,
k=10
)
@staticmethod
def within_geojson(network: NetworkMeta, geojson: str):
geom = sqla_fn.ST_GeomFromGeoJSON(geojson)
within = NodeMeta.location.ST_Within(geom)
query = NodeMeta.query.filter(within)
query = query.filter(NodeMeta.sensor_network == network.name)
return query
@staticmethod
def sensors_from_nodes(nodes):
sensors_list = []
for node in nodes:
sensors_list += node.sensors
return set(sensors_list)
def features(self) -> set:
feature_set = set()
for feature in self.tree().values():
feature_set.update(feature.keys())
return feature_set
def __repr__(self):
return '<Node {!r}>'.format(self.id)
def tree(self):
return {s.name: s.tree() for s in self.sensors}
class SensorMeta(postgres_base):
__tablename__ = 'sensor__sensor_metadata'
name = Column(String, primary_key=True)
observed_properties = Column(JSONB)
info = Column(JSONB)
def features(self) -> set:
"""Return the features that this sensor reports on.
"""
return {e.split('.')[0] for e in self.tree()}
def __repr__(self):
return '<Sensor {!r}>'.format(self.name)
def tree(self):
return {v: k for k, v in self.observed_properties.items()}
class FeatureMeta(postgres_base):
__tablename__ = 'sensor__feature_metadata'
name = Column(String, primary_key=True)
networks = relationship('NetworkMeta', secondary='sensor__feature_to_network')
observed_properties = Column(JSONB)
def types(self):
"""Return a dictionary with the properties mapped to their types.
"""
return {e['name']: e['type'] for e in self.observed_properties}
def sensors(self) -> set:
"""Return the set of sensors that report on this feature.
"""
results = set()
for network in self.networks:
for node in network.tree().values():
for sensor, properties in node.items():
if self.name in {p.split('.')[0] for p in properties}:
results.add(sensor)
return results
@staticmethod
def index(network_name=None):
features = []
for node in postgres_session.query(NodeMeta).all():
if network_name is None or node.sensor_network.lower() == network_name.lower():
for sensor in node.sensors:
for prop in sensor.observed_properties.values():
features.append(prop.split('.')[0].lower())
return list(set(features))
@staticmethod
def properties_of(feature):
query = postgres_session.query(FeatureMeta.observed_properties).filter(
FeatureMeta.name == feature)
return [feature + '.' + prop['name'] for prop in query.first().observed_properties]
def mirror(self):
"""Create feature tables in redshift for all the networks associated
with this feature.
"""
for network in self.networks:
self._mirror(network.name)
def _mirror(self, network_name: str):
"""Create a feature table in redshift for the specified network.
"""
columns = []
for feature in self.observed_properties:
column_name = feature['name']
column_type = database_types[feature['type'].upper()]
columns.append(Column(column_name, column_type, default=None))
redshift_table = Table(
'{}__{}'.format(network_name, self.name),
|
MehmetNuri/ozgurlukicin
|
beyin2/__init__.py
|
Python
|
gpl-3.0
| 188
| 0.005376
|
#!/usr/bin/python
# -*- coding:
|
utf-8 -*-
#
# Copyright 2010 TÜBİTAK UEKAE
# Licensed under the GNU General Public License, ve
|
rsion 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
|
geographika/mappyfile
|
docs/scripts/class_diagrams.py
|
Python
|
mit
| 3,102
| 0.000645
|
r"""
Create MapServer class diagrams
Requires https://graphviz.gitlab.io/_pages/Download/Download_windows.html
https://stackoverflow.com/questions/1494492/graphviz-how-to-go-from-dot-to-a-graph
For DOT languge see http://www.graphviz.org/doc/info/attrs.html
cd C:\Program Files (x86)\Graphviz2.38\bin
dot -Tpng D:\GitHub\mappyfile\mapfile_classes.dot -o outfile.png
outfile.png
For Entity Relationship diagrams:
https://graphviz.readthedocs.io/en/stable/examples.html#er-py
"""
import os
import pydot
# import pprint
FONT = "Lucida Sans"
def graphviz_setup
|
(gviz_path):
os.environ['PATH'] = gviz_path + ";" + os.environ['PATH']
|
def add_child(graph, child_id, child_label, parent_id, colour):
"""
http://www.graphviz.org/doc/info/shapes.html#polygon
"""
node = pydot.Node(child_id, style="filled", fillcolor=colour, label=child_label, shape="polygon", fontname=FONT)
graph.add_node(node)
graph.add_edge(pydot.Edge(parent_id, node))
def add_children(graph, parent_id, d, level=0):
blue = "#6b6bd1"
white = "#fdfefd"
green = "#33a333"
colours = [blue, white, green] * 3
for class_, children in d.items():
colour = colours[level]
child_label = class_
child_id = parent_id + "_" + class_
add_child(graph, child_id, child_label, parent_id, colour)
add_children(graph, child_id, children, level+1)
def save_file(graph, fn):
filename = "%s.png" % fn
graph.write_png(filename)
graph.write("%s.dot" % fn)
os.startfile(filename)
def main(gviz_path, layer_only=False):
graphviz_setup(gviz_path)
graph = pydot.Dot(graph_type='digraph', rankdir="TB")
layer_children = {
'CLASS': {
'LABEL': {'STYLE': {}},
'CONNECTIONOPTIONS': {},
'LEADER': {'STYLE': {}},
'STYLE': {},
'VALIDATION': {}
},
'CLUSTER': {},
'COMPOSITE': {},
'FEATURE': {'POINTS': {}},
'GRID': {},
'JOIN': {},
'METADATA': {},
'PROJECTION': {},
'SCALETOKEN': {'VALUES': {}},
'VALIDATION': {}
}
# pprint.pprint(layer_children)
classes = {
"MAP": {
"LAYER": layer_children,
'LEGEND': {'LABEL': {}},
'PROJECTION': {},
'QUERYMAP': {},
'REFERENCE': {},
'SCALEBAR': {'LABEL': {}},
'SYMBOL': {},
'WEB': {'METADATA': {}, 'VALIDATION': {}}
}
}
if layer_only:
root = "LAYER"
classes = classes["MAP"]
fn = "layer_classes"
else:
fn = "map_classes"
root, = classes.keys()
node = pydot.Node(root, style="filled", fillcolor="#33a333", label=root, fontname=FONT, shape="polygon")
graph.add_node(node)
add_children(graph, root, classes[root])
save_file(graph, fn)
if __name__ == "__main__":
gviz_path = r"C:\Program Files (x86)\Graphviz2.38\bin"
main(gviz_path, True)
main(gviz_path, False)
print("Done!")
|
caesar2164/edx-platform
|
openedx/stanford/lms/djangoapps/instructor/urls.py
|
Python
|
agpl-3.0
| 1,465
| 0.004096
|
from django.conf.urls import url
urlpatterns = [
url(
r'delete_report_download',
'openedx.stanford.lms.djangoapps.instructor.views.api.delete_report_download',
name='delete_report_download',
),
url(
r'^get_blank_lti$',
'openedx.stanford.lms.djangoapps.instructor.views.api.get_blank_lti',
name='get_blank_lti',
),
url(
r'get_course_forums_usage',
'openedx.stanford.lms.djangoapps.instructor.views.api.get_course_forums_usage_view',
name='get_course_forums_usage',
),
url(
r'get_ora2_responses/(?:(?P<include_email>\w+)/)?$',
'openedx.stanford.lms.djangoapps.instructor.views.api.get_ora2_responses_view',
name='get_ora2_responses',
),
url(
r'get_student_forums_usage',
'openedx.stanford.lms.djangoapps.instructor.views.api.get_student_forums_usage_view',
name='get_student_forums_usage',
),
url(
r'^get_student_responses$',
|
'openedx.stanford.lms.djangoapps.instructor.views.api.get_student_responses_view',
name='get_student_responses',
|
),
url(
r'^graph_course_forums_usage',
'openedx.stanford.lms.djangoapps.instructor.views.api.graph_course_forums_usage',
name='graph_course_forums_usage',
),
url(
r'^upload_lti$',
'openedx.stanford.lms.djangoapps.instructor.views.api.upload_lti',
name='upload_lti',
),
]
|
buma/TogglViz
|
setup.py
|
Python
|
gpl-2.0
| 438
| 0
|
from setuptools import setup
setup(
name='TogglViz',
version='0.1de
|
v',
author='Marko Burjek',
packages=['togglviz', ],
scripts=['bin/fill.py', ],
license='LICENSE.txt',
long_description=o
|
pen('README.txt').read(),
install_requires=[
"SQLAlchemy==0.7.9",
"docopt==0.5.0",
"schema==0.1.1",
"python-dateutil==2.1",
],
)
|
tylerdave/reqcli
|
reqcli/cli.py
|
Python
|
mit
| 1,667
| 0.004199
|
import click
import requests
@click.command()
@click.argument('url')
@click.option('--show-headers', '-H', is_flag=True, default=False)
@click.option('--show-status', '-S', is_flag=True, default=False)
@click.option('--quiet', '-Q', is_flag=True, default=False)
@click.option('--allow-redirects/--no-allow-redirects', default=True)
@click.option('--verbose', '-v', is_flag=True, default=False)
def cli(url, show_headers, show_status, quiet, allow_redirects, verbose):
# Make the request
if verbose:
click.secho('Making HTTP request to "{0}"...'.format(url), err=True, fg='white')
try:
response = requests.get(url, allow_redirects=allow_redirects)
response.raise_for_status()
except requests.exceptions.RequestException as e:
click.secho(str(
|
e), err=True, fg='yellow' )
raise click.Abort()
except Exception as e:
click.secho(str(e), err=True, fg='red' )
raise click.Abort()
status_colors = {
2: 'green',
3: 'blue',
4: 'yellow',
5: 'red',
}
# Show the response status
if show_status:
status_color = status_colors.get(int(response
|
.status_code) / 100)
click.secho('Status: {0}'.format(response.status_code), err=True, fg=status_color)
# Show the response headers
if show_headers:
click.echo(format_headers(response.headers), err=True)
# Show the response body
if not quiet:
click.echo(response.text)
if __name__ == '__main__':
cli()
def format_headers(headers):
formatted = ['{0}: {1}'.format(k, v) for k, v in headers.items()]
return '\n'.join(formatted)
|
mfherbst/spack
|
lib/spack/spack/util/environment.py
|
Python
|
lgpl-2.1
| 3,500
| 0
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermo
|
re National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
|
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import contextlib
import os
system_paths = ['/', '/usr', '/usr/local']
suffixes = ['bin', 'bin64', 'include', 'lib', 'lib64']
system_dirs = [os.path.join(p, s) for s in suffixes for p in system_paths] + \
system_paths
def is_system_path(path):
"""Predicate that given a path returns True if it is a system path,
False otherwise.
Args:
path (str): path to a directory
Returns:
True or False
"""
return os.path.normpath(path) in system_dirs
def filter_system_paths(paths):
return [p for p in paths if not is_system_path(p)]
def get_path(name):
path = os.environ.get(name, "").strip()
if path:
return path.split(":")
else:
return []
def env_flag(name):
if name in os.environ:
value = os.environ[name].lower()
return value == "true" or value == "1"
return False
def path_set(var_name, directories):
path_str = ":".join(str(dir) for dir in directories)
os.environ[var_name] = path_str
def path_put_first(var_name, directories):
"""Puts the provided directories first in the path, adding them
if they're not already there.
"""
path = os.environ.get(var_name, "").split(':')
for dir in directories:
if dir in path:
path.remove(dir)
new_path = tuple(directories) + tuple(path)
path_set(var_name, new_path)
def dump_environment(path):
"""Dump the current environment out to a file."""
with open(path, 'w') as env_file:
for key, val in sorted(os.environ.items()):
env_file.write('export %s="%s"\n' % (key, val))
@contextlib.contextmanager
def set_env(**kwargs):
"""Temporarily sets and restores environment variables.
Variables can be set as keyword arguments to this function.
"""
saved = {}
for var, value in kwargs.items():
if var in os.environ:
saved[var] = os.environ[var]
if value is None:
if var in os.environ:
del os.environ[var]
else:
os.environ[var] = value
yield
for var, value in kwargs.items():
if var in saved:
os.environ[var] = saved[var]
else:
if var in os.environ:
del os.environ[var]
|
svenhertle/django_image_exif
|
image_exif/migrations/0001_initial.py
|
Python
|
mit
| 1,140
| 0.005263
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('filer', '0002_auto_20150606_2003'),
]
operations = [
migrations.CreateModel(
name='ExifData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('focal_length'
|
, models.CharField(max_length=100, verbose_name='Fo
|
cal length', blank=True)),
('iso', models.CharField(max_length=100, verbose_name='ISO', blank=True)),
('fraction', models.CharField(max_length=100, verbose_name='Fraction', blank=True)),
('exposure_time', models.CharField(max_length=100, verbose_name='Exposure time', blank=True)),
('image', models.OneToOneField(verbose_name='Image', to='filer.Image')),
],
options={
'verbose_name': 'EXIF Data',
'verbose_name_plural': 'EXIF data',
},
bases=(models.Model,),
),
]
|
CPSC491FileMaker/project
|
calTimer.QThread.py
|
Python
|
gpl-2.0
| 717
| 0.013947
|
#rew
|
rite of original calTimer to use qthreads as opposed to native python threads
#needed to make UI changes (impossible from native)
#also attempting to alleviate need for sigterm to stop perm loop
from PyQt4 import QtCore
import time,os,ctypes
import sys
class ca
|
lTimer(QtCore.QThread):
xml_file = './data/data.xml'
fileSize = os.stat(xml_file)
def initFileSize(self):
print "initfilesize run"
fileToCheck = os.stat(self.xml_file)
self.fileSize = fileToCheck.st_size
def run(self):
self.initFileSize()
testFileSize = self.fileSize
while testFileSize == self.fileSize:
print "No change - sleep 3"
#time.sleep(3)
|
serefimov/billboards
|
billboards/boards/parsing/__init__.py
|
Python
|
mit
| 57
| 0.017544
|
# -*- c
|
oding: utf-8 -
|
*-
__author__ = 'Sergey Efimov'
|
marco-lancini/Showcase
|
app_collaborations/options.py
|
Python
|
mit
| 5,831
| 0.008918
|
from django.utils.translation import ugettext as _
#=========================================================================
# HELPERS
#=========================================================================
def get_display(key, list):
d = dict(list)
if key in d:
return d[key]
return None
def get_creative_fields(category=None):
"""
Access MAP_CATEGORY_FIELDS and extract creative fields
:param category: filter
:returns: all the creative fields belonging to the specified category. If no category is specified, returns all the creative fields
"""
def __get_values(res, v):
val = list(v)
if isinstance(v[0], tuple):
for el in val:
res = res + ((el[0], el[1]),)
else:
res = res + ((v[0],v[1]),)
return res
# Extract fields
res = ()
for k,v in MAP_CATEGORY_FIELDS.items():
if category != None and category != k:
pass
else:
res = __get_values(res, v)
# Sort alphabetically
return sorted(res, key=lambda x: x[1])
def get_creative_field_verbose(id):
return get_display(id, CREATIVE_FIELDS)
def get_category_verbose(id):
return get_display(id, CATEGORIES)
#=========================================================================
# OPTIONS
#=========================================================================
"""
Dictionary of tuples:
- each key is a tuple representing a category
- each item is a tuple of tuples, each one representing a creative field
"""
MAP_CATEGORY_FIELDS = {
('AR', 'Architecture'): (
('A1', _('Architecture')),
('A2', _('Landscape Design')),
('A3', _('Street Design')),
),
('BS', 'Business'): (
('B1', _('Advertising')),
('B2', _('Branding')),
('B3', _('Entrepreneurship')),
),
('CY', 'Cinematography'): (
('C1', _('Cinematography')),
('C2', _('Directing')),
('C3', _('Film')),
('C4', _('Storyboarding')),
),
('CU', 'Culinary Arts'): (
('U1', _('Cooking')),
('U2', _('Bakering')),
('U3', _('Food and Beverage')),
('U4', _('Food Critic')),
('U5', _('Food Instructor')),
('U6', _('Food Styling')),
('U7', _('Food Writing')),
),
('DS', 'Design'): (
('D1', _('Automotive Design')),
('D2', _('Exhibition Design')),
('D3', _('Furniture Design')),
('D4', _('Industrial Design')),
('D5', _('Interior Design')),
('D6', _('Light Design')),
('D7', _('Packaging')),
),
('EN', 'Engineering'): (
('E1', _('Engineering')),
('E2', _('Information Architecture')),
('E3', _('Industrial Design')),
('E4', _('Product Design')),
),
('FH', 'Fashion'): (
('F1', _('Fashion')),
('F2', _('Fashion Styling')),
('F3', _('Jewelry Design')),
('F4', _('MakeUp Arts')),
),
('FI', 'Fine Arts'): (
('R1', _('Calligraphy')),
('R2', _('Comics')),
('R3', _('Drawing')),
('R4', _('Illustration')),
('R5', _('Mosaics')),
('R6', _('Painting')),
('R7', _('Sculpting')),
),
('GR', 'Graphics'): (
('G1', _('Animation')),
('G2', _('Computer Animation')),
('G3', _('Digital Art')),
('G4', _('Graphic Design')),
('G5', _('Icon Design')),
('G6', _('Motion Graphics')),
('G7', _('Visual Effects')),
),
('IT
|
', 'Information Technology'): (
|
('I1', _('Mobile Programming')),
('I2', _('Programming')),
('I3', _('Software Engineering')),
('I4', _('User Interface Design')),
('I5', _('Videogame Design')),
('I6', _('Web Design')),
('I7', _('Web Development')),
),
('JU', 'Journalism'): (
('J1', _('Journalism')),
('J2', _('Photojournalism')),
('J3', _('Photoreporting')),
),
('MA', 'Manual Arts'): (
('M1', _('Crafts')),
('M2', _('Graffiti')),
),
('PF', 'Performing Arts'): (
('P1', _('Acting')),
('P2', _('Dancing')),
('P3', _('Music')),
),
('PH', 'Photography'): (
('H1', _('Digital Photography')),
('H2', _('Photography')),
),
('WR', 'Writing'): (
('W1', _('Character Design')),
('W2', _('Copywriting')),
('W3', _('Illustration')),
('W4', _('Typography')),
('W5', _('Writing')),
),
}
# List of categories
CATEGORIES = MAP_CATEGORY_FIELDS.keys()
# List of creative fields
CREATIVE_FIELDS = get_creative_fields()
|
TelekomCloud/virt-manager
|
tests/nodedev.py
|
Python
|
gpl-2.0
| 9,369
| 0.000534
|
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import os.path
import unittest
from virtinst import NodeDeviceParser as nodeparse
from virtinst import VirtualHostDevice
from tests import utils
conn = utils.open_testdriver()
unknown_xml = """
<device>
<name>foodevice</name>
<parent>computer</parent>
<capability type='frobtype'>
<hardware>
<vendor>LENOVO</vendor>
</hardware>
</capability>
</device>
"""
class TestNodeDev(unittest.TestCase):
def _nodeDevFromName(self, devname):
node = conn.nodeDeviceLookupByName(devname)
xml = node.XMLDesc(0)
return nodeparse.parse(xml)
def _testCompare(self, devname, vals, devxml=None):
if devxml:
dev = nodeparse.parse(devxml)
else:
dev = self._nodeDevFromName(devname)
for attr in vals.keys():
self.assertEqual(vals[attr], getattr(dev, attr))
def _testNode2DeviceCompare(self, nodename, devfile,
nodedev=None, is_dup=False):
devfile = os.path.join("tests/nodedev-xml/devxml", devfile)
if not nodedev:
nodedev = self._nodeDevFromName(nodename)
dev = VirtualHostDevice.device_from_node(conn, nodedev=nodedev,
is_dup=is_dup)
utils.diff_compare(dev.get_xml_config() + "\n", devfile)
def testSystemDevice(self):
devname = "computer"
vals = {"hw_vendor": "LENOVO", "hw_version": "ThinkPad T61",
"hw_serial": "L3B2616",
"hw_uuid": "97e80381-494f-11cb-8e0e-cbc168f7d753",
"fw_vendor": "LENOVO", "fw_version": "7LET51WW (1.21 )",
"fw_date": "08/22/2007",
"device_type": nodeparse.CAPABILITY_TYPE_SYSTEM,
"name": "computer", "parent": None}
self._testCompare(devname, vals)
def testNetDevice1(self):
devname = "net_00_1c_25_10_b1_e4"
vals = {"name": "net_00_1c_25_10_b1_e4", "parent": "pci_8086_1049",
"device_type": nodeparse.CAPABILITY_TYPE_NET,
"interface": "eth0", "address": "00:1c:25:10:b1:e4",
"capability_type": "80203"}
self._testCompare(devname, vals)
def testNetDevice2(self):
devname = "net_00_1c_bf_04_29_a4"
vals = {"name": "net_00_1c_bf_04_29_a4", "parent": "pci_8086_4227",
"device_type": nodeparse.CAPABILITY_TYPE_NET,
"interface": "wlan0", "address": "00:1c:bf:04:29:a4",
"capability_type": "80211"}
self._testCompare(devname, vals)
def testPCIDevice1(self):
devname = "pci_1180_592"
vals = {"name": "pci_1180_592", "parent": "pci_8086_2448",
"device_type": nodeparse.CAPABILITY_TYPE_PCI,
"domain": "0", "bus": "21", "slot": "0", "function": "4",
"product_id": "0x0592", "vendor_id": "0x1180",
"product_name": "R5C592 Memory Stick Bus Host Adapter",
"vendor_name": "
|
Ricoh Co Ltd"}
self._testCompare(devname, vals)
def testPCIDevice2(self):
devname = "pci_8086_1049"
vals = {"name": "pci_8086_1049", "parent": "computer",
"device_type": nodeparse.CAPABIL
|
ITY_TYPE_PCI,
"domain": "0", "bus": "0", "slot": "25", "function": "0",
"product_id": "0x1049", "vendor_id": "0x8086",
"product_name": "82566MM Gigabit Network Connection",
"vendor_name": "Intel Corporation"}
self._testCompare(devname, vals)
def testUSBDevDevice1(self):
devname = "usb_device_781_5151_2004453082054CA1BEEE"
vals = {"name": "usb_device_781_5151_2004453082054CA1BEEE",
"parent": "usb_device_1d6b_2_0000_00_1a_7",
"device_type": nodeparse.CAPABILITY_TYPE_USBDEV,
"bus": "1", "device": "4", "product_id": '0x5151',
"vendor_id": '0x0781',
"vendor_name": "SanDisk Corp.",
"product_name": "Cruzer Micro 256/512MB Flash Drive"}
self._testCompare(devname, vals)
def testUSBDevDevice2(self):
devname = "usb_device_483_2016_noserial"
vals = {"name": "usb_device_483_2016_noserial",
"parent": "usb_device_1d6b_1_0000_00_1a_0",
"device_type": nodeparse.CAPABILITY_TYPE_USBDEV,
"bus": "3", "device": "2", "product_id": '0x2016',
"vendor_id": '0x0483',
"vendor_name": "SGS Thomson Microelectronics",
"product_name": "Fingerprint Reader"}
self._testCompare(devname, vals)
def testStorageDevice1(self):
devname = "storage_serial_SATA_WDC_WD1600AAJS__WD_WCAP95119685"
vals = {"name": "storage_serial_SATA_WDC_WD1600AAJS__WD_WCAP95119685",
"parent": "pci_8086_27c0_scsi_host_scsi_device_lun0",
"device_type": nodeparse.CAPABILITY_TYPE_STORAGE,
"block": "/dev/sda", "bus": "scsi", "drive_type": "disk",
"model": "WDC WD1600AAJS-2", "vendor": "ATA",
"size": 160041885696, "removable": False,
"hotpluggable": False, "media_available": False,
"media_size": 0, "media_label": None}
self._testCompare(devname, vals)
def testStorageDevice2(self):
devname = "storage_serial_SanDisk_Cruzer_Micro_2004453082054CA1BEEE_0_0"
vals = {"name": "storage_serial_SanDisk_Cruzer_Micro_2004453082054CA1BEEE_0_0",
"parent": "usb_device_781_5151_2004453082054CA1BEEE_if0_scsi_host_0_scsi_device_lun0",
"device_type": nodeparse.CAPABILITY_TYPE_STORAGE,
"block": "/dev/sdb", "bus": "usb", "drive_type": "disk",
"model": "Cruzer Micro", "vendor": "SanDisk", "size": 0,
"removable": True, "hotpluggable": True,
"media_available": True, "media_size": 12345678}
self._testCompare(devname, vals)
def testUSBBus(self):
devname = "usb_device_1d6b_1_0000_00_1d_1_if0"
vals = {"name": "usb_device_1d6b_1_0000_00_1d_1_if0",
"parent": "usb_device_1d6b_1_0000_00_1d_1",
"device_type": nodeparse.CAPABILITY_TYPE_USBBUS,
"number": "0", "classval": "9", "subclass": "0",
"protocol": "0"}
self._testCompare(devname, vals)
def testSCSIBus(self):
devname = "pci_8086_2829_scsi_host_1"
vals = {"name": "pci_8086_2829_scsi_host_1",
"parent": "pci_8086_2829",
"device_type": nodeparse.CAPABILITY_TYPE_SCSIBUS,
"host": "2"}
self._testCompare(devname, vals)
def testNPIV(self):
devname = "pci_10df_fe00_0_scsi_host"
vals = {"name": "pci_10df_fe00_0_scsi_host",
"device_type": nodeparse.CAPABILITY_TYPE_SCSIBUS,
"host": "4", "fc_host": True, "vport_ops" : True,
"wwnn": "20000000c9848141", "wwpn": "10000000c9848141"}
self._testCompare(devname, vals)
def testSCSIDevice(self):
devname = "pci_8086_2829_scsi_host_scsi_device_lun0"
vals = {"name": "pci_8086_2829_scsi_host_scsi_device_lun0",
"parent": "pci_8086_2829_scsi_host",
"host": "0", "bus": "0", "target": "0", "lun": "0",
"type": "disk"}
self._testCompare(devname, vals)
def testUnknownDevice(self):
vals
|
jaja14/lab5
|
main/main.py
|
Python
|
mit
| 5,013
| 0.007181
|
# -*- coding: utf-8 -*-
import logging
from flask.ext import wtf
from google.appengine.api import mail
import flask
import config
import util
app = flask.Flask(__name__)
app.config.from_object(config)
app.jinja_env.line_statement_prefix = '#'
app.jinja_env.line_comment_prefix = '##'
app.jinja_env.globals.update(slugify=util.slugify)
app.jinja_env.globals.update(update_query_argument=util.update_query_argument)
import admin
import auth
import user
import contact
if config.DEVELOPMENT:
from werkzeug import debug
app.wsgi_app = debug.DebuggedApplication(app.wsgi_app, evalex=True)
###############################################################################
# Main page
###############################################################################
@app.route('/')
def welcome():
return flask.render_template('welcome.html', html_class='welcome')
###############################################################################
# Sitemap stuff
###############################################################################
@app.route('/sitemap.xml')
def sitemap():
response = flask.make_response(flask.render_template(
'sitemap.xml',
host_url=flask.request.host_url[:-1],
lastmod=config.CURRENT_VERSION_DATE.strftime('%Y-%m-%d'),
))
response.headers['Content-Type'] = 'application/xml'
return response
###############################################################################
# Profile stuff
###############################################################################
class ProfileUpdateForm(wtf.Form):
name = wtf.StringField('Name',
[wtf.validators.required()], filters=[util.strip_filter],
)
email = wtf.StringField('Email',
[wtf.validators.optional(), wtf.validators.email()],
filters=[util.email_filter],
)
@app.route('/_s/profile/', endpoint='profile_service')
@app.route('/profile/', methods=['GET', 'POST'])
@auth.login_required
def profile():
user_db = auth.current_user_db()
form = ProfileUpdateForm(obj=user_db)
if form.validate_on_submit():
form.populate_obj(user_db)
user_db.put()
return flask.redirect(flask.url_for('welcome'))
if flask.request.path.startswith('/_s/'):
return util.jsonify_model_db(user_db)
return flask.render_template(
'profile.html',
title=user_db.name,
html_class='profile',
form=form,
user_db=user_db,
has_json=True,
)
###############################################################################
# Feedback
###############################################################################
class FeedbackForm(wtf.Form):
subject = wtf.StringField('Subject',
[wtf.validators.required()], filters=[util.strip_filter],
)
message = wtf.TextAreaField('Message',
[wtf.validators.required()], filters=[util.strip_filter],
)
email = wtf.StringField('Email (optional)',
[wtf.validators.optional(), wtf.validators.email()],
filters=[util.email_filter],
)
@app.route('/feedback/', methods=['GET', 'POST'])
def feedback():
if not config.CONFIG_DB.feedback_email:
return flask.abort(418)
form = FeedbackForm(obj=auth.current_user_db())
if form.validate_on_submit():
mail.send_mail(
|
sender=config.CONFIG_DB.feedback_email,
to=config.CONFIG_DB.feedback_email,
subject='[%s] %s' % (
config.CONFIG_DB.brand_name,
form.subject.data,
|
),
reply_to=form.email.data or config.CONFIG_DB.feedback_email,
body='%s\n\n%s' % (form.message.data, form.email.data)
)
flask.flash('Thank you for your feedback!', category='success')
return flask.redirect(flask.url_for('welcome'))
return flask.render_template(
'feedback.html',
title='Feedback',
html_class='feedback',
form=form,
)
###############################################################################
# Error Handling
###############################################################################
@app.errorhandler(400) # Bad Request
@app.errorhandler(401) # Unauthorized
@app.errorhandler(403) # Forbidden
@app.errorhandler(404) # Not Found
@app.errorhandler(405) # Method Not Allowed
@app.errorhandler(410) # Gone
@app.errorhandler(418) # I'm a Teapot
@app.errorhandler(500) # Internal Server Error
def error_handler(e):
logging.exception(e)
try:
e.code
except AttributeError:
e.code = 500
e.name = 'Internal Server Error'
if flask.request.path.startswith('/_s/'):
return util.jsonpify({
'status': 'error',
'error_code': e.code,
'error_name': util.slugify(e.name),
'error_message': e.name,
'error_class': e.__class__.__name__,
}), e.code
return flask.render_template(
'error.html',
title='Error %d (%s)!!1' % (e.code, e.name),
html_class='error-page',
error=e,
), e.code
if config.PRODUCTION:
@app.errorhandler(Exception)
def production_error_handler(e):
return error_handler(e)
|
TresAmigosSD/SMV
|
src/test/python/testModuleHash/after/src/main/python/stage/modules.py
|
Python
|
apache-2.0
| 4,078
| 0.012016
|
#
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import udl as lib
import same as unchanged
from smv import SmvApp, SmvModule, SmvHi
|
veTable, SmvCsvFile
def sameFunc():
"""a function which is the same in before/after"""
return "I'm the same!"
def differentFunc():
"""a function that has different source in before/a
|
fter"""
return "I'm in after!"
class ChangeCode(SmvModule):
def requiresDS(self):
return []
def run(self, i):
return self.smvApp.createDF("k:String;v:Integer", "a,;b,3")
class AddComment(SmvModule):
# I ADDED A COMMENT, POP!
def requiresDS(self):
return []
def run(self,i):
return self.smvApp.createDF("k:String;v:Integer", "a,;b,5")
class DependencyB(SmvModule):
def requiresDS(self):
return []
def run(self,i):
return self.smvApp.createDF("k:String;v:Integer", "a,;b,215")
class Dependent(DependencyB):
def requiresDS(self):
return []
def run(self,i):
return self.smvApp.createDF("k:String;v:Integer", "a,;b,7")
class Upstream(SmvModule):
def requiresDS(self):
return []
def run(self,i):
return self.smvApp.createDF("k:String;v:Integer", "a,;b,46")
class DifferentLibrary(SmvModule):
def requiresDS(self):
return []
def requiresLib(self):
return [lib]
def run(self, i):
number = lib.getNumber()
return self.smvApp.createDF("k:String;v:Integer", "a,1;b,2;c:3").limit(number)
class SameLibrary(SmvModule):
def requiresDS(self):
return []
def requiresLib(self):
return [unchanged]
def run(self, i):
df = self.smvApp.createDF("k:String;v:Integer", "a,1;b,2;c:3")
return df.withColumn('new', unchanged.UnchangedFunction())
class SameFunc(SmvModule):
def requiresDS(self):
return []
def requiresLib(self):
return [sameFunc]
def run(self, i):
df = self.smvApp.createDF("k:String;v:Integer", "a,1;b,2;c:3")
return df.withColumn('new', sameFunc())
class DifferentFunc(SmvModule):
def requiresDS(self):
return []
def requiresLib(self):
return [differentFunc]
def run(self, i):
df = self.smvApp.createDF("k:String;v:Integer", "a,1;b,2;c:3")
return df.withColumn('new', differentFunc())
class Downstream(SmvModule):
def requiresDS(self):
return[Upstream]
def run(self,i):
return self.smvApp.createDF("k:String;v:Integer", "a,;b,30")
class Parent(SmvModule):
def requiresDS(self):
return[Upstream]
def run(self,i):
return self.smvApp.createDF("k:String;v:Integer", "a,;b,31")
class HiveTableWithVersion(SmvHiveTable):
def requiresDS(self):
return []
def tableName(self):
return "HiveTableWithVersion"
def version(self):
return "1.1"
class CsvFileWithRun(SmvCsvFile):
def requiresDS(self):
return []
def path(self):
return "foo"
def run(self, df):
return df.select("bar")
class CsvFileWithAttr(SmvCsvFile):
def path(self):
return "foo"
def userSchema(self):
return "@quote-char=';eman:String;di:integer"
class Child(Parent):
pass
class UsesConfigValue(SmvModule):
def requiresDS(self):
return[]
def run(self, i):
pass
def requiresConfig(self):
return ["keyChanges"]
class DoesntConfigValue(SmvModule):
def requiresDS(self):
return []
def run(self, i):
pass
def requiresConfig(self):
return ["keyDoesntChange"]
|
indashnet/InDashNet.Open.UN2000
|
android/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
|
Python
|
apache-2.0
| 20,425
| 0.003329
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import datetime
import json
import logging
import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self._http_server_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(self._options, args)
def _is_http_test(self, test):
return self.HTTP_SUBDIR in test or self
|
._is_websocket_test(test)
def _is_websocket_test(self, test):
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, pat
|
hs, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
running multiple copies of NRWTs. Perf tests are locked
because heavy load caused by running other tests in parallel
might cause some of them to timeout."""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_expected_missing(self, test_file):
expectations = self._expectations.model().get_expectations(test_file)
return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
def _test_is_slow(self, test_file):
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
def needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names)
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if not self._port.check_build(self.needs_servers(test_names)):
_log.error("Build check failed")
return False
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
if not self._port.check_sys_deps(self.needs_servers(test_names)):
self._port.stop_helper()
return False
if self._options.clobber_old_results:
self._clobber_old_results()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return True
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update("Collecting tests ...")
try:
|
LockScreen/Backend
|
venv/lib/python2.7/site-packages/botocore/credentials.py
|
Python
|
mit
| 22,856
| 0.000306
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in t
|
he "licen
|
se" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import functools
import logging
import os
from botocore.compat import six
from six.moves import configparser
from dateutil.parser import parse
from dateutil.tz import tzlocal
import botocore.config
import botocore.compat
from botocore.compat import total_seconds
from botocore.exceptions import UnknownCredentialError
from botocore.exceptions import PartialCredentialsError
from botocore.exceptions import ConfigNotFound
from botocore.utils import InstanceMetadataFetcher, parse_key_val_file
logger = logging.getLogger(__name__)
def create_credential_resolver(session):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
credential_file = session.get_config_variable('credentials_file')
config_file = session.get_config_variable('config_file')
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
env_provider = EnvProvider()
providers = [
env_provider,
SharedCredentialProvider(
creds_filename=credential_file,
profile_name=profile_name
),
# The new config file has precedence over the legacy
# config file.
ConfigProvider(config_filename=config_file, profile_name=profile_name),
OriginalEC2Provider(),
BotoProvider(),
InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts)
)
]
explicit_profile = session.get_config_variable('profile',
methods=('instance',))
if explicit_profile is not None:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
else:
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = botocore.compat.ensure_unicode(self.access_key)
self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar refresh_timeout: How long a given set of credentials are valid for.
Useful for credentials fetched over the network.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
:ivar session: The ``Session`` the credentials were created for. Useful for
subclasses.
"""
refresh_timeout = 15 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self.method = method
self._normalize()
def _normalize(self):
self._access_key = botocore.compat.ensure_unicode(self._access_key)
self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self):
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= self.refresh_timeout:
# There's enough time left. Don't refresh.
return False
# Assume the worst & refresh.
logger.debug("Credentials need to be refreshed.")
return True
def _refresh(self):
if not self.refresh_needed():
return
metadata = self._refresh_using()
self._set_from_data(metadata)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, d
|
buchuki/opterator
|
examples/basic.py
|
Python
|
mit
| 1,198
| 0.000835
|
'''So you want a quick and dirty command line app without screwing around
with argparse or ge
|
topt, but also without a complicated if-else on the length
of sys.argv. You don't really need a comprehensive help file, because it's just
you running the script and knowing what options are
|
available is enough.
How many boilerplate lines of code is it gonna take?'''
from opterator import opterate # 1
@opterate # 2
def main(filename, color='red', verbose=False): # 3
print(filename, color, verbose)
main() # 4
''' Answer: 4 lines.
You get a program that you can call on the command line like so:
$ python examples/basic.py this_file
this_file red False
or so:
python examples/basic.py this_file --color=blue
this_file blue False
or even so:
$ python examples/basic.py --color=purple another_file --verbose
another_file purple True
And you get a not too useless help description:
$ python examples/basic.py -h
Usage: basic.py [options] filename
Options:
-h, --help show this help message and exit
-c COLOR, --color=COLOR
-v, --verbose
'''
|
ergoithz/browsepy
|
browsepy/manager.py
|
Python
|
mit
| 24,389
| 0
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
import sys
import argparse
import warnings
import collections
from flask import current_app
from werkzeug.utils import cached_property
from . import mimetype
from . import compat
from .compat import deprecated, usedoc
def defaultsnamedtuple(name, fields, defaults=None):
'''
Generate namedtuple with default values.
:param name: name
:param fields: iterable with field names
:param defaults: iterable or mapping with field defaults
:returns: defaultdict with given fields and given defaults
:rtype: collections.defaultdict
'''
nt = collections.namedtuple(name, fields)
nt.__new__.__defaults__ = (None,) * len(nt._fields)
if isinstance(defaults, collections.Mapping):
nt.__new__.__defaults__ = tuple(nt(**defaults))
elif defaults:
nt.__new__.__defaults__ = tuple(nt(*defaults))
return nt
class PluginNotFoundError(ImportError):
pass
class WidgetException(Exception):
pass
class WidgetParameterException(WidgetException):
pass
class InvalidArgumentError(ValueError):
pass
class PluginManagerBase(object):
'''
Base plugin manager for plugin module loading and Flask extension logic.
'''
@property
def namespaces(self):
'''
List of plugin namespaces taken from app config.
'''
return self.app.config['plugin_namespaces'] if self.app else []
def __init__(self, app=None):
if app is None:
self.clear()
else:
self.init_app(app)
def init_app(self, app):
'''
Initialize this Flask extension for given app.
'''
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['plugin_manager'] = self
self.reload()
def reload(self):
'''
Clear plugin manager state and reload plugins.
This method will make use of :meth:`clear` and :meth:`load_plugin`,
so all internal state will be cleared, and all plugins defined in
:data:`self.app.config['plugin_modules']` will be loaded.
'''
self.clear()
for plugin in self.app.config.get('plugin_modules', ()):
self.load_plugin(plugin)
def clear(self):
'''
Clear plugin manager state.
'''
pass
def import_plugin(self, plugin):
'''
Import plugin by given name, looking at :attr:`namespaces`.
:param plugin: plugin module name
:type plugin: str
:raises PluginNotFoundError: if not found on any namespace
'''
names = [
'%s%s%s' % (namespace, '' if namespace[-1] == '_' else '.', plugin)
if namespace else
plugin
for namespace in self.namespaces
]
for name in names:
if name in sys.modules:
return sys.modules[name]
for name in names:
try:
__import__(name)
return sys.modules[name]
except (ImportError, KeyError):
pass
raise PluginNotFoundError(
'No plugin module %r found, tried %r' % (plugin, names),
plugin, names)
def load_plugin(self, plugin):
'''
Import plugin (see :meth:`import_plugin`) and load related data.
:param plugin: plugin module name
:type plugin: str
:raises PluginNotFoundError: if not found on any namespace
'''
return self.import_plugin(plugin)
class RegistrablePluginManager(PluginManagerBase):
'''
Base plugin manager for plugin registration via :func:`register_plugin`
functions at plugin module level.
'''
def load_plugin(self, plugin):
'''
Import plugin (see :meth:`import_plugin`) and load related data.
If available, plugin's module-level :func:`register_plugin` function
will be called with current plugin manager instance as first argument.
:param plugin: plugin module name
:type plugin: str
:raises PluginNotFoundError: if not found on any namespace
'''
module = super(RegistrablePluginManager, self).load_plugin(plugin)
if hasattr(module, 'register_plugin'):
module.register_plugin(self)
return module
class BlueprintPluginManager(RegistrablePluginManager):
'''
Manager for blueprint registration via :meth:`register_plugin` calls.
Note: blueprints are not removed on `clear` nor reloaded on `reload`
as flask does not allow it.
'''
def __init__(self, app=None):
self._blueprint_known = set()
super(BlueprintPluginManager, self).__init__(app=app)
def register_blueprint(self, blueprint):
'''
Register given blueprint on curren app.
This method is provided for using inside plugin's module-level
:func:`register_plugin` functions.
:param blueprint: blueprint object with plugin endpoints
:type blueprint: flask.Blueprint
'''
if blueprint not in self._blueprint_known:
self.app.register_blueprint(blueprint)
self._blueprint_known.add(blueprint)
class WidgetPluginManager(RegistrablePluginManager):
'''
Plugin manager for widget registration.
This class provides a dictionary of widget types at its
:attr:`widget_types` attribute. They can be referenced by their keys on
both :meth:`create_widget` and :meth:`register_widget` methods' `type`
parameter, or instantiated directly and passed to :meth:`register_widget`
via `widget` parameter.
'''
widget_types = {
'base': defaultsnamedtuple(
'Widget',
('place', 'type')),
'link': defaultsnamedtuple(
'Link',
('place', 'type', 'css', 'icon', 'text', 'endpoint', 'href'),
{
'type': 'link',
'text': lambda f: f.name,
'icon': lambda f: f.category
}),
'button': defaultsnamedtuple(
'Button',
('place', 'type', 'css', 'text', 'endpoint', 'href'),
{'type': 'button'}),
'upload': defaultsnamedtuple(
'Upload',
('place', 'type', 'css', 'text', 'endpoint', 'action'),
{'type': 'upload'}),
'stylesheet': defaultsnamedtuple(
'Stylesheet',
('place', 'type', 'endpoint', 'filename', 'href'),
{'type': 'stylesheet'}),
'script': defaultsnamedtupl
|
e(
'Script',
('place', 'type',
|
'endpoint', 'filename', 'src'),
{'type': 'script'}),
'html': defaultsnamedtuple(
'Html',
('place', 'type', 'html'),
{'type': 'html'}),
}
def clear(self):
'''
Clear plugin manager state.
Registered widgets will be disposed after calling this method.
'''
self._widgets = []
super(WidgetPluginManager, self).clear()
def get_widgets(self, file=None, place=None):
'''
List registered widgets, optionally matching given criteria.
:param file: optional file object will be passed to widgets' filter
functions.
:type file: browsepy.file.Node or None
:param place: optional template place hint.
:type place: str
:returns: list of widget instances
:rtype: list of objects
'''
return list(self.iter_widgets(file, place))
@classmethod
def _resolve_widget(cls, file, widget):
'''
Resolve widget callable properties into static ones.
:param file: file will be used to resolve callable properties.
:type file: browsepy.file.Node
:param widget: widget instance optionally with callable properties
:type widget: object
:returns: a new widget instance of the same type as widget parameter
:rtype: object
'''
return widget.__class__(*[
value(file) if callable(value) else value
for value in widget
|
vedgar/ip
|
Chomsky/util.py
|
Python
|
unlicense
| 14,523
| 0.002998
|
import random, itertools, operator, types, pprint, contextlib, collections
import textwrap, string, pdb, copy, abc, functools
memoiziraj = functools.lru_cache(maxsize=None)
def djeljiv(m, n):
"""Je li m djeljiv s n?"""
return not m % n
def ispiši(automat):
"""Relativno uredan ispis (konačnog ili potisnog) automata."""
pprint.pprint(automat.komponente)
def Kartezijev_produkt(*skupovi):
"""Skup uređenih n-torki."""
return set(itertools.product(*skupovi))
def funkcija(f, domena, kodomena):
"""Je li f:domena->kodomena?"""
return f.keys() == domena and set(f.values()) <= kodomena
class fset(set):
"""Ponaša se kao frozenset, ispisuje se kao set."""
def __repr__(self):
return repr(set(self)) if self else '∅'
def __or__(self, other):
return fset(set(self) | set(other))
def __and__(self, other):
return fset(set(self) & set(other))
def __sub__(self, other):
return fset(set(self) - set(other))
def __xor__(self, other):
return fset(set(self) ^ set(other))
__ror__, __rand__, __rsub__, __rxor__ = __or__, __and__, __sub__, __xor__
def __hash__(self):
return hash(frozenset(self))
def __iand__(self, other):
return NotImplemented
def __ior__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __ixor__(self, other):
return NotImplemented
def add(self, value):
raise TypeError('fset is immutable')
def clear(self):
raise TypeError('fset is immutable')
def difference_update(self, other):
raise TypeError('fset is immutable')
def intersection_update(self, other):
raise TypeError('fset is immutable')
def discard(self, value):
raise TypeError('fset is immutable')
def pop(self):
raise TypeError('fset is immutable')
def remove(self, value):
raise TypeError('fset is immutable')
def symmetric_difference_update(self, other):
raise TypeError('fset is immutable')
def update(self, other):
raise TypeError('fset is immutable')
def difference(self, other):
return self - other
def intersection(self, other):
return self & other
def symmetric_difference(self, other):
return self ^ other
def union(self, other):
return self | other
def copy(self):
return self
def __dir__(self):
return dir(frozenset)
def partitivni_skup(skup):
"""Skup svih podskupova zadanog skupa."""
return {fset(itertools.compress(skup, χ))
for χ in itertools.product({False, True}, repeat=len(skup))}
def relacija(R, *skupovi):
"""Je li R relacija među zadanim skupovima?"""
return R <= Kartezijev_produkt(*skupovi)
def sažmi(vrijednost):
|
"""Sažimanje 1-torki u njihove elemente. Ostale n-torke ne dira."""
with contextlib.suppress(TypeError, ValueError):
komponenta, = vrijednost
return komponenta
return vrijednost
def naniži(vrijednost):
"""Pretvaranje vrijednosti koja nije n-torka u 1-torku."""
return vrijednost if isinstance(vrijednost, tuple) else (vrijednost,)
def fun
|
kcija_iz_relacije(relacija, *domene):
"""Pretvara R⊆A×B×C×D×E (uz domene A, B) u f:A×B→℘(C×D×E)."""
m = len(domene)
funkcija = {sažmi(x): set() for x in Kartezijev_produkt(*domene)}
for n_torka in relacija:
assert len(n_torka) > m
for x_i, domena_i in zip(n_torka, domene):
assert x_i in domena_i
x, y = n_torka[:m], n_torka[m:]
if len(x) == 1: x, = x
if len(y) == 1: y, = y
funkcija[sažmi(x)].add(sažmi(y))
return funkcija
def relacija_iz_funkcije(funkcija):
"""Pretvara f:A×B→℘(C×D×E) u R⊆A×B×C×D×E."""
return {naniži(x) + naniži(y) for x, yi in funkcija.items() for y in yi}
def unija_familije(familija):
"""Unija familije skupova."""
return fset(x for skup in familija for x in skup)
def disjunktna_unija(*skupovi):
"""Unija skupova, osiguravajući da su u parovima disjunktni."""
for skup1, skup2 in itertools.combinations(skupovi, 2):
assert skup1.isdisjoint(skup2)
return set().union(*skupovi)
def ε_proširenje(Σ):
"""Σ∪{ε}"""
return disjunktna_unija(Σ, {ε})
def primijeni(pravilo, riječ, mjesto):
"""Primjenjuje gramatičko pravilo na zadanom mjestu (indeksu) u riječi."""
varijabla, *zamjena = pravilo
assert riječ[mjesto] == varijabla
rezultat = list(riječ[:mjesto]) + zamjena + list(riječ[mjesto+1:])
return ''.join(rezultat) if isinstance(riječ, str) else rezultat
class Kontraprimjer(Exception):
"""Jezik se ne slaže sa zadanom specifikacijom."""
def __init__(self, test, spec):
self.args = "Jezik {}sadrži {!r}".format('ne '*bool(spec), test),
class PrazanString(str):
"""Klasa koja određuje ponašanje objekta ε."""
def __add__(self, other):
return other
def __mul__(self, n):
return self
def __len__(self):
return 0
def __repr__(self):
return 'ε'
__radd__, __rmul__, __str__ = __add__, __mul__, __repr__
ε = PrazanString()
def parsiraj_tablicu_KA(tablica):
"""Parsiranje tabličnog zapisa konačnog automata (Sipser page 36).
Prvo stanje je početno, završna su označena znakom # na kraju reda."""
prva, *ostale = tablica.strip().splitlines()
znakovi = prva.split()
assert all(len(znak) == 1 for znak in znakovi)
abeceda = set(znakovi)
stanja, završna = set(), set()
prijelaz, početno = {}, None
for linija in ostale:
stanje, *dolazna = linija.split()
if početno is None: početno = stanje
extra = len(dolazna) - len(znakovi)
assert extra in {0, 1}
if extra == 1:
assert dolazna.pop() == '#'
završna.add(stanje)
for znak, dolazno in zip(znakovi, dolazna):
prijelaz[stanje, znak] = dolazno
stanja.add(stanje)
return stanja, abeceda, prijelaz, početno, završna
def parsiraj_tablicu_NKA(tablica):
"""Parsiranje tabličnog zapisa nedeterminističkog KA (Sipser page 54).
Prvo stanje je početno, završna su označena znakom # na kraju reda.
ε-prijelazi su nakon svih znak-prijelaza (stupac čije zaglavlje nema znaka).
Izostanak prijelaza označava se znakom / na odgovarajućem mjestu.
Višestruki prijelazi za isto stanje i znak razdvojeni su znakom /."""
prva, *ostale = tablica.strip().splitlines()
znakovi = prva.split()
assert all(len(znak) == 1 for znak in znakovi)
abeceda = set(znakovi)
stanja, završna = set(), set()
prijelaz, početno = set(), None
for linija in ostale:
stanje, *dolazna = linija.split()
if početno is None: početno = stanje
extra = len(dolazna) - len(znakovi)
assert extra >= 0
if extra > 0 and dolazna[~0] == '#':
del dolazna[~0]
završna.add(stanje)
for znak, dolazno in zip(znakovi, dolazna):
for dolazno1 in filter(None, dolazno.split('/')):
prijelaz.add((stanje, znak, dolazno1))
for dolazno in dolazna[len(znakovi):]:
for dolazno2 in dolazno.split('/'):
prijelaz.add((stanje, ε, dolazno2))
stanja.add(stanje)
return stanja, abeceda, prijelaz, početno, završna
def parsiraj_tablicu_PA(tablica):
"""Parsiranje tabličnog zapisa (relacije prijelaza) potisnog automata.
Svaki redak ima polazno stanje, čitani znak, pop znak, dolazno, push znak.
Prvo polazno stanje je početno, završna su označena znakom # na kraju reda.
ε se označava znakom /. Završno stanje iz kojeg ne izlazi strelica je #."""
stanja, abeceda, abeceda_stoga, prijelaz = set(), set(), set(), set()
početno, završna = None, set()
def dodaj(znak, skup):
if znak in {'/', 'ε'}:
return ε
skup.add(znak)
return znak
for linija in tablica.strip().splitlines():
trenutno_završno = False
ćelije = linija.split()
if len(ćelije) == 6:
assert ćelije.pop() == '#'
trenutno_završno = True
|
xiaoyongaa/ALL
|
网络编程第四周/socket_client.py
|
Python
|
apache-2.0
| 717
| 0.038052
|
import socket
ip_port=("127.0.0.1",9999)
#买手机
s=socket.socket()
|
#直接打电话
s.connect(ip_port)
while True:
#发消息
send_data=input("please ").strip()
if len(send_data)==0:
continue
send_data=bytes(send_data,encoding="utf8")
#客户端发消息 s相当于服务端的conn
s.send(send_data)
print("----------------------------1")
if str(send_data,encoding="utf-8")=="exit" or str(send_data,encoding="utf-8")=="EXIT":break
#收消息
recv_data=s.recv(1024)
print("----------------------
|
------2")
recv_data=str(recv_data,encoding="utf-8")
# if recv_data=="exit" or recv_data=="EXIT":
# break
print(recv_data)
#挂电话
s.close()
|
kived/python-for-android
|
pythonforandroid/bootstraps/pygame/build/build.py
|
Python
|
mit
| 18,072
| 0.000664
|
#!/usr/bin/env python2.7
from os.path import dirname, join, isfile, realpath, relpath, split, exists
from zipfile import ZipFile
import sys
sys.path.insert(0, 'buildlib/jinja2.egg')
sys.path.insert(0, 'buildlib')
from fnmatch import fnmatch
import tarfile
import os
import shutil
import subprocess
import time
import jinja2
# The extension of the android and ant commands.
if os.name == 'nt':
ANDROID = 'android.bat'
ANT = 'ant.bat'
else:
ANDROID = 'android'
ANT = 'ant'
# if ANDROIDSDK is on path, use android from this path
ANDROIDSDK = os.environ.get('ANDROIDSDK')
if ANDROIDSDK:
ANDROID = os.path.join(ANDROIDSDK, 'tools', ANDROID)
curdir = dirname(__file__)
# Try to find a host version of Python that matches our ARM version.
PYTHON = join(curdir, 'python-install', 'bin', 'python.host')
BLACKLIST_PATTERNS = [
# code versionning
'^*.hg/*',
'^*.git/*',
'^*.bzr/*',
'^*.svn/*',
# pyc/py
'*.pyc',
'*.py',
# temp files
'~',
'*.bak',
'*.swp',
]
WHITELIST_PATTERNS = []
python_files = []
# Used by render.
environment = jinja2.Environment(loader=jinja2.FileSystemLoader(
join(curdir, 'templates')))
def render(template, dest, **kwargs):
'''Using jinja2, render `template` to the filename `dest`, supplying the
keyword arguments as template parameters.
'''
template = environment.get_template(template)
text = template.render(**kwargs)
f = open(dest, 'wb')
f.write(text.encode('utf-8'))
f.close()
def compile_dir(dfn):
'''
Compile *.py in directory `dfn` to *.pyo
'''
# -OO = strip docstrings
subprocess.call([PYTHON, '-OO', '-m', 'compileall', '-f', dfn])
def is_whitelist(name):
return match_filename(WHITELIST_PATTERNS, name)
def is_blacklist(name):
if is_whitelist(name):
return False
return match_filename(BLACKLIST_PATTER
|
NS, name)
def match_filename(pattern_list, name):
for pattern in pattern_list:
if pattern.startswith('^'):
pattern = pattern[1:]
else:
pattern = '*/' + pattern
if fnmatch(name, pattern):
return True
def listfiles(d):
basedir = d
subdirlist = []
for item in os.listdir(d):
fn = join(d, item)
if isfile(f
|
n):
yield fn
else:
subdirlist.append(os.path.join(basedir, item))
for subdir in subdirlist:
for fn in listfiles(subdir):
yield fn
def make_pythonzip():
'''
Search for all the python related files, and construct the pythonXX.zip
According to
# http://randomsplat.com/id5-cross-compiling-python-for-embedded-linux.html
site-packages, config and lib-dynload will be not included.
'''
global python_files
d = realpath(join('private', 'lib', 'python2.7'))
# selector function
def select(fn):
if is_blacklist(fn):
return False
fn = realpath(fn)
assert(fn.startswith(d))
fn = fn[len(d):]
if (fn.startswith('/site-packages/') or
fn.startswith('/config/') or
fn.startswith('/lib-dynload/') or
fn.startswith('/libpymodules.so')):
return False
return fn
# get a list of all python file
python_files = [x for x in listfiles(d) if select(x)]
# create the final zipfile
zfn = join('private', 'lib', 'python27.zip')
zf = ZipFile(zfn, 'w')
# put all the python files in it
for fn in python_files:
afn = fn[len(d):]
zf.write(fn, afn)
zf.close()
def make_tar(tfn, source_dirs, ignore_path=[]):
'''
Make a zip file `fn` from the contents of source_dis.
'''
# selector function
def select(fn):
rfn = realpath(fn)
for p in ignore_path:
if p.endswith('/'):
p = p[:-1]
if rfn.startswith(p):
return False
if rfn in python_files:
return False
return not is_blacklist(fn)
# get the files and relpath file of all the directory we asked for
files = []
for sd in source_dirs:
sd = realpath(sd)
compile_dir(sd)
files += [(x, relpath(realpath(x), sd)) for x in listfiles(sd)
if select(x)]
# create tar.gz of thoses files
tf = tarfile.open(tfn, 'w:gz', format=tarfile.USTAR_FORMAT)
dirs = []
for fn, afn in files:
# print('%s: %s' % (tfn, fn))
dn = dirname(afn)
if dn not in dirs:
# create every dirs first if not exist yet
d = ''
for component in split(dn):
d = join(d, component)
if d.startswith('/'):
d = d[1:]
if d == '' or d in dirs:
continue
dirs.append(d)
tinfo = tarfile.TarInfo(d)
tinfo.type = tarfile.DIRTYPE
tf.addfile(tinfo)
# put the file
tf.add(fn, afn)
tf.close()
def make_package(args):
version_code = 0
manifest_extra = ['<uses-feature android:glEsVersion="0x00020000" />']
for filename in args.manifest_extra:
with open(filename, "r") as fd:
content = fd.read()
manifest_extra.append(content)
manifest_extra = '\n'.join(manifest_extra)
url_scheme = 'kivy'
default_icon = 'templates/kivy-icon.png'
default_presplash = 'templates/kivy-presplash.jpg'
default_ouya_icon = 'templates/kivy-ouya-icon.png'
# Figure out the version code, if necessary.
if not args.numeric_version:
for i in args.version.split('.'):
version_code *= 100
version_code += int(i)
args.numeric_version = str(version_code)
# args.name = args.name.decode('utf-8')
# if args.icon_name:
# args.icon_name = args.icon_name.decode('utf-8')
versioned_name = (args.name.replace(' ', '').replace('\'', '') +
'-' + args.version)
# Android SDK rev14 needs two ant execs (ex: debug installd) and
# new build.xml
build_tpl = 'build.xml'
if not args.icon_name:
args.icon_name = args.name
# Annoying fixups.
args.name = args.name.replace('\'', '\\\'')
args.icon_name = args.icon_name.replace('\'', '\\\'')
# Figure out versions of the private and public data.
private_version = str(time.time())
if args.dir:
public_version = private_version
else:
public_version = None
if args.intent_filters:
intent_filters = open(args.intent_filters).read()
else:
intent_filters = ''
# Figure out if application has service part
service = False
directory = args.dir if public_version else args.private
if not (exists(join(realpath(directory), 'main.py')) or
exists(join(realpath(directory), 'main.pyo'))):
print('''BUILD FAILURE: No main.py(o) found in your app directory. This
file must exist to act as the entry point for you app. If your app is
started by a file with a different name, rename it to main.py or add a
main.py that loads it.''')
exit(1)
if directory:
service_main = join(realpath(directory), 'service', 'main.py')
if os.path.exists(service_main) or os.path.exists(service_main + 'o'):
service = True
# Check if OUYA support is enabled
if args.ouya_category:
args.ouya_category = args.ouya_category.upper()
if args.ouya_category not in ('GAME', 'APP'):
print('Invalid --ouya-category argument. should be one of'
'GAME or APP')
sys.exit(-1)
# Render the various templates into control files.
render(
'AndroidManifest.tmpl.xml',
'AndroidManifest.xml',
args=args,
service=service,
url_scheme=url_scheme,
intent_filters=intent_filters,
manifest_extra=manifest_extra,
)
render(
'Configuration.tmpl.java',
'src/org/renpy/android/Configuration.java',
args=args)
render(
build_tpl,
'build.xml',
args=args,
versioned_name=version
|
jimarnold/gomatic
|
gomatic/__init__.py
|
Python
|
mit
| 484
| 0.002066
|
from gomatic.go_cd_configurator import HostRestClient, GoCdConfigurator
from gomatic.gocd.agents import Agent
from gomatic.gocd.materials import GitMaterial, PipelineMaterial
from gomatic.gocd.pipelines import Tab, Job, Pipeline, PipelineGroup
from gomatic.gocd.tasks import FetchArtifactTask, ExecTask, RakeTask
from gomatic.gocd.artifacts import FetchArtifactFile, FetchArtifactDir, BuildArtifact, TestArtifact, ArtifactFor
from gomatic.fake import FakeHostRestClient, empty_confi
|
g
|
|
digwanderlust/pants
|
tests/python/pants_test/backend/jvm/targets/test_jvm_binary.py
|
Python
|
apache-2.0
| 9,783
| 0.00828
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from textwrap import dedent
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jvm_binary import (Duplicate, JarRules, JvmBinary, ManifestEntries,
Skip)
from pants.base.address import BuildFileAddress
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload_field import FingerprintedField
from pants.base.target import Target
from pants_test.base_test import BaseTest
class JarRulesTest(unittest.TestCase):
def test_jar_rule(self):
dup_rule = Duplicate('foo', Duplicate.REPLACE)
self.assertEquals('Duplicate(apply_pattern=foo, action=REPLACE)',
repr(dup_rule))
skip_rule = Skip('foo')
self.assertEquals('Skip(apply_pattern=foo)', repr(skip_rule))
def test_invalid_apply_pattern(self):
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'):
Skip(None)
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'):
Duplicate(None, Duplicate.SKIP)
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'):
Skip(r')')
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'):
Duplicate(r')', Duplicate.SKIP)
def test_bad_action(self):
with self.assertRaisesRegexp(ValueError, r'The supplied action must be one of'):
Duplicate('foo', None)
def test_duplicate_error(self):
with self.assertRaisesRegexp(Duplicate.Error, r'Duplicate entry encountered for path foo'):
raise Duplicate.Error('foo')
def test_default(self):
jar_rules = JarRules.default()
self.assertTrue(4, len(jar_rules.rules))
for rule in jar_rules.rules:
self.assertTrue(rule.apply_pattern.pattern.startswith(r'^META-INF'))
def test_set_bad_default(self):
with self.assertRaisesRegexp(ValueError, r'The default rules must be a JarRules'):
JarRules.set_default(None)
class JvmBinaryTest(BaseTest):
@property
def alias_groups(self):
return register_j
|
vm()
def test_simple(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
basename='foo-base',
)
'''))
target = self.target('//:foo')
self.assertEquals('com.example.Foo', target.main)
self.ass
|
ertEquals('com.example.Foo', target.payload.main)
self.assertEquals('foo-base', target.basename)
self.assertEquals('foo-base', target.payload.basename)
self.assertEquals([], target.deploy_excludes)
self.assertEquals([], target.payload.deploy_excludes)
self.assertEquals(JarRules.default(), target.deploy_jar_rules)
self.assertEquals(JarRules.default(), target.payload.deploy_jar_rules)
self.assertEquals({}, target.payload.manifest_entries.entries);
def test_default_base(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
)
'''))
target = self.target('//:foo')
self.assertEquals('foo', target.basename)
def test_deploy_jar_excludes(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_excludes=[exclude(org='example.com', name='foo-lib')],
)
'''))
target = self.target('//:foo')
self.assertEquals([Exclude(org='example.com', name='foo-lib')],
target.deploy_excludes)
def test_deploy_jar_rules(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_jar_rules=jar_rules([Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL)
)
'''))
target = self.target('//:foo')
jar_rules = target.deploy_jar_rules
self.assertEquals(1, len(jar_rules.rules))
self.assertEquals('foo', jar_rules.rules[0].apply_pattern.pattern)
self.assertEquals(repr(Duplicate.SKIP),
repr(jar_rules.rules[0].action)) # <object object at 0x...>
self.assertEquals(Duplicate.FAIL, jar_rules.default_dup_action)
def test_bad_source_declaration(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
source=['foo.py'],
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*foo.*source must be a single'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo'))
def test_bad_sources_declaration(self):
with self.assertRaisesRegexp(Target.IllegalArgument,
r'jvm_binary only supports a single "source" argument'):
self.make_target('foo:foo', target_type=JvmBinary, main='com.example.Foo', sources=['foo.py'])
def test_bad_main_declaration(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='bar',
main=['com.example.Bar'],
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*bar.*main must be a fully'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'bar'))
def test_bad_jar_rules(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_jar_rules='invalid',
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*foo.*'
r'deploy_jar_rules must be a JarRules specification. got str'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo'))
def _assert_fingerprints_not_equal(self, fields):
for field in fields:
for other_field in fields:
if field == other_field:
continue
self.assertNotEquals(field.fingerprint(), other_field.fingerprint())
def test_jar_rules_field(self):
field1 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)]))
field1_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)]))
field2 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.CONCAT)]))
field3 = FingerprintedField(JarRules(rules=[Duplicate('bar', Duplicate.SKIP)]))
field4 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP),
Duplicate('bar', Duplicate.SKIP)]))
field5 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP), Skip('foo')]))
field6 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL))
field6_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL))
field7 = FingerprintedField(JarRules(rules=[Skip('foo')]))
field8 = FingerprintedField(JarRules(rules=[Skip('bar')]))
field8_same = FingerprintedField(JarRules(rules=[Skip('bar')]))
self.assertEquals(field1.fingerprint(), field1_same.fingerprint())
self.assertEquals(field6.fingerprint(), field6_same.fingerprint())
self.assertEquals(field8.fingerprint(), field8_same.fingerprint())
self._assert_fingerprints_not_equal([field1, field2, field3, field4, field5, field6, field7])
def test_manifest_entries(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
manifest_entries= {
'Foo-Field' : 'foo',
}
)
|
zoff/torrentz-deluge-plugin
|
torrentztrackersautoload/__init__.py
|
Python
|
gpl-2.0
| 1,603
| 0.000624
|
#
# __init__.py
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU Gen
|
eral Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to
|
do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
from deluge.plugins.init import PluginInitBase
class CorePlugin(PluginInitBase):
def __init__(self, plugin_name):
from core import Core as _plugin_cls
self._plugin_cls = _plugin_cls
super(CorePlugin, self).__init__(plugin_name)
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/python/estimator/model_fn.py
|
Python
|
mit
| 12,173
| 0.004354
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods related to model_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.estimator.export.export_output import ExportOutput
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import nest
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `PREDICT`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
LOSS_METRIC_KEY = 'loss'
AVERAGE_LOSS_METRIC_KEY = 'average_loss'
class EstimatorSpec(
collections.namedtuple('EstimatorSpec', [
'predictions', 'loss', 'train_op', 'eval_metric_ops',
'export_outputs', 'training_chief_hooks', 'training_hooks',
'scaffold', 'evaluation_hooks'
])):
"""Ops and objects returned from a `model_fn` and passed to an `Estimator`.
`EstimatorSpec` fully defines the model to be run by an `Estimator`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
export_outputs=None,
training_chief_hooks=None,
training_hooks=None,
scaffold=None,
evaluation_hooks=None):
"""Creates a validated `EstimatorSpec` instance.
Depending on the value of `mode`, different arguments are required. Namely
* For `mode == ModeKeys.TRAIN`: required fields are `loss` and `train_op`.
* For `mode == M
|
odeKeys.EVAL`: required field is `loss`.
* For `mode == ModeKeys.PREDICT`: required fields are `predictions`.
model_fn can populate
|
all arguments independent of mode. In this case, some
arguments will be ignored by an `Estimator`. E.g. `train_op` will be
ignored in eval and infer modes. Example:
```python
def my_model_fn(mode, features, labels):
predictions = ...
loss = ...
train_op = ...
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Alternatively, model_fn can just populate the arguments appropriate to the
given mode. Example:
```python
def my_model_fn(mode, features, labels):
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
loss = ...
else:
loss = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = ...
else:
train_op = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = ...
else:
predictions = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Args:
mode: A `ModeKeys`. Specifies if this is training, evaluation or
prediction.
predictions: Predictions `Tensor` or dict of `Tensor`.
loss: Training loss `Tensor`. Must be either scalar, or with shape `[1]`.
train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple.
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving.
A dict `{name: output}` where:
* name: An arbitrary name for this output.
* output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Single-headed models only need to specify one entry in this dictionary.
Multi-headed models should specify one entry for each head, one of
which must be named using
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.
training_chief_hooks: Iterable of `tf.train.SessionRunHook` objects to
run on the chief worker during training.
training_hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers during training.
scaffold: A `tf.train.Scaffold` object that can be used to set
initialization, saver, and more to be used in training.
evaluation_hooks: Iterable of `tf.train.SessionRunHook` objects to
run during evaluation.
Returns:
A validated `EstimatorSpec` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
# Validate train_op.
if train_op is None:
if mode == ModeKeys.TRAIN:
raise ValueError('Missing train_op.')
else:
_check_is_tensor_or_operation(train_op, 'train_op')
# Validate loss.
if loss is None:
if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
raise ValueError('Missing loss.')
else:
loss = _check_is_tensor(loss, 'loss')
loss_shape = loss.get_shape()
if loss_shape.num_elements() not in (None, 1):
raise ValueError('Loss must be scalar, given: {}'.format(loss))
if not loss_shape.is_compatible_with(tensor_shape.scalar()):
loss = array_ops.reshape(loss, [])
# Validate predictions.
if predictions is None:
if mode == ModeKeys.PREDICT:
raise ValueError('Missing predictions.')
predictions = {}
else:
if isinstance(predictions, dict):
predictions = {
k: _check_is_tensor(v, 'predictions[{}]'.format(k))
for k, v in six.iteritems(predictions)
}
else:
predictions = _check_is_tensor(predictions, 'predictions')
# Validate eval_metric_ops.
if eval_metric_ops is None:
eval_metric_ops = {}
else:
if not isinstance(eval_metric_ops, dict):
raise TypeError(
'eval_metric_ops must be a dict, given: {}'.format(eval_metric_ops))
for key, metric_value_and_update in six.iteritems(eval_metric_ops):
if (not isinstance(metric_value_and_update, tuple) or
len(metric_value_and_update) != 2):
raise TypeError(
'Values of eval_metric_ops must be (metric_value, update_op) '
'tuples, given: {} for key: {}'.format(
metric_value_and_update, key))
metric_value, metric_update = metric_value_and_update
for metric_value_member in nest.flatten(metric_value):
# Allow (possibly nested) tuples for metric values, but require that
# each of them be Tensors or Operations.
_check_is_tensor_or_operation(metric_value_member,
'eval_metric_ops[{}]'.format(key))
_check_is_tensor_or_operation(metric_update,
'eval_metric_ops[{}]'.format(key))
# Validate export_outputs.
if export_outputs is not None:
if not isinstance(export_outputs, dict):
raise TypeError('export_outputs must be dict, given: {}'.format(
export_outputs))
for v in six.itervalues(export_outputs):
if not isinstance(v, ExportOutput):
|
camm/dsfinterp
|
setup.py
|
Python
|
mit
| 880
| 0.027273
|
'''
Created on Jan 15, 2014
@author: Jose Borreguero
'''
from setuptools import setup
setup(
name = 'dsfinterp',
packages = ['dsfinterp','dsfinterp/test' ],
version = '0.1',
description = 'Cubic Spline Interpolation of Dynamics Structure Factors',
long_description = open('R
|
EADME.md').read(),
author = 'Jose Borreguero',
author_email = 'jose@borreguero.com',
url = 'https://github.com/camm-sns/dsfinterp',
download_url = 'http://pypi.python.org/pypi/dsfinterp',
keywords = ['AMBER', 'mdend', 'energy', 'molecular dynami
|
cs'],
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Physics',
],
)
|
spatialcollective/watsan
|
watsan/views/settings_views.py
|
Python
|
mit
| 4,402
| 0.024989
|
import json
import uuid
from django.contrib.auth import logout
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from watsan.models import WatsanUserMeta, Organization, NewEmail
from watsan.models.forms import AddUserForm, EditUserNameForm, ChangeUserEmailForm
from django.core.mail import send_mail
from django.http import HttpResponse
@login_required(login_url='/watsan/login/?next=/watsan')
def settings(request):
meta = WatsanUserMeta.objects.get(user=request.user)
members = WatsanUserMeta.objects.filter(organization=meta.organization)
adduserform = AddUserForm()
if request.method == 'POST':
adduserform = AddUserForm(request.POST)
if adduserform.is_valid():
if adduserform.clean_email2():
user = adduserform.save(commit=False)
user.username = user.email
password = User.objects.make_random_password()
user.set_password(password)
user.save()
meta = WatsanUserMeta.objects.get(user=request.user)
new_meta = WatsanUserMeta(user=user, organization=meta.organization)
new_meta.save()
#email added user
#TODO: change register_url
message = render_to_string('watsan/email/welcome.html', {'password': password, 'invite_from': request.user, 'register_url':'http://localhost:8000/watsan/login'})
send_mail('Welcome!', message, 'kariuki@ishuah.com', [user.email])
adduserform = AddUserForm()
return render(request, 'watsan/dashboard/settings.html', { 'meta': meta, 'members': members, 'adduserform': adduserform, 'message': user.email+" added!" })
return render(request, 'watsan/dashboard/settings.html', { 'meta': meta, 'members': members, 'adduserform': adduserform })
else:
return render(request, 'watsan/dashboard/settings.html', { 'meta': meta, 'members': members, 'adduserform': adduserform })
else:
return render(request, 'watsan/dashboard/settings.html', { 'meta': meta, 'members': members, 'adduserform': adduserform })
@login_required(login_url='/watsan/login/?next=/watsan')
def change_user_name(request):
if request.method == 'POST':
new_name = EditUserNameForm(request.POST).save(commit=False)
request.user.first_name = new_name.first_name
request.user.last_name = new_name.last_name
request.user.save();
return redi
|
rect('/watsan/settings')
else:
new_name_form = EditUserNameForm({'first_name': request.user.first_name, 'last_name': request.user.last_name })
return render(request, 'watsan/dashboard/change_user_name.html', { 'new_name_form': new_name_form })
@login_required(login_url='/watsan/login/?next=/watsan')
def change_user_emai
|
l(request):
if request.method == 'POST':
new_email_form = ChangeUserEmailForm(request.POST)
if new_email_form.is_valid():
new_email = new_email_form.save(commit=False)
if User.objects.filter(email=new_email.email).exists():
new_email_form._errors['email'] = new_email_form.error_class([u"That email is already registered."])
return render(request, 'watsan/dashboard/change_user_email.html', {'new_email_form': new_email_form })
else:
new_email.user = request.user
new_email.hash_string = uuid.uuid1().hex[:9]
transfered = False
new_email.save()
message = render_to_string('watsan/email/change_email.html', { 'link': 'http://localhost:8000/watsan/settings/change_user_email_complete/'+new_email.hash_string })
send_mail('Changing your watsan email', message, 'kariuki@ishuah.com', [new_email.email])
return render(request, 'watsan/dashboard/change_user_email_done.html')
else:
return render(request, 'watsan/dashboard/change_user_email.html', {'new_email_form': new_email_form })
else:
new_email_form = ChangeUserEmailForm()
return render(request, 'watsan/dashboard/change_user_email.html', {'new_email_form': new_email_form })
def change_user_email_complete(request, hash_string):
try:
new_email = NewEmail.objects.get(hash_string=hash_string)
except:
return render(request, 'watsan/dashboard/change_user_email_error.html')
user = new_email.user
user.email = new_email.email
user.username = new_email.email
user.save()
new_email.transfered = True
new_email.hash_string = ''
new_email.save()
if request.user.is_authenticated():
logout(request)
return render(request, 'watsan/dashboard/change_user_email_complete.html')
|
webkom/holonet
|
holonet/settings/development.py
|
Python
|
mit
| 1,029
| 0.000972
|
from .base import BASE_DIR, INSTALLED_APPS, MIDDLEWARE_CLASSES, REST_FRAMEWORK
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'holonet',
'USER': 'holonet',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '',
}
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://127.0.0.1:6379/0',
}
}
EMAIL_BACKEND = 'django.utils.mail.backends.console.EmailBackend'
BROKER_URL = 'redis://127.0.0.1'
ELASTICSEARCH = {
'default': {
'hosts': [
'127.0.0.1:9200'
]
}
}
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] += ['rest_framework.renderers.BrowsableAPIRenderer']
INSTALLED_APPS += ('debug_toolbar'
|
, )
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddlew
|
are', )
INTERNAL_IPS = ('127.0.0.1', )
POSTFIX_TRANSPORT_MAPS_LOCATION = '{0}/../mta/shared/'.format(BASE_DIR)
|
mashedkeyboard/Headlights
|
runtests.py
|
Python
|
gpl-3.0
| 1,771
| 0.003953
|
# Headlights testing
# Designed to be run by Travis CI. Should work for humans too, we suppose. But humans? Bleh.
# Remember to set the HEADLIGHTS_TESTMODE and HEADLIGHTS_DPKEY env-vars before testing.
import tests.configuration, tests.printer, tests.server, tests.plugins
import main
try:
|
# Run the configuration tests
tests.configuration.headlightsConfTest()
print("Headlights.cfg.sample configuration test passed")
tests.configuration.weatherConfTest()
print("Weather.cfg.sample configuration test passed")
tests.configuration.webConfTest()
print("Web.cfg configuration test passed")
tests.configuration.headlight
|
sSaveCfgTest()
print("Test.cfg configuration save test passed")
# Run printer-related tests with dummy printers
tests.printer.testSetFont()
print("Dummy printer font set test passed")
tests.printer.testPrintText()
print("Dummy printer text print test passed")
tests.printer.testPrintImage()
print("Dummy printer image print test passed")
tests.printer.testCutPaper()
print("Dummy printer cut paper test passed")
# Run the test for the web server
tests.server.testWebServer()
print("Internal web server test passed")
# Run the plugin loader tests
tests.plugins.testLoadPlugin()
print("Plugin loader test passed")
# Finally, run the actual Headlights script - this should be done with the testmode env-var
main.start()
print("Main script tests passed")
# If this all works, congrats - you've managed to not screw something up, which is a miracle!
print("All tests passed - headlights build tests successful")
except Exception as e:
print("Tests failed with exception \"" + str(e) + "\" - headlights build tests failed.")
raise
|
StefanRijnhart/bank-statement-import
|
account_bank_statement_import/__init__.py
|
Python
|
agpl-3.0
| 102
| 0
|
# -*- encoding: utf-8 -*-
from . import res_partner_bank
from . import account_bank_statement_imp
|
ort
|
|
ianzhengnan/learnpy
|
coroutine.py
|
Python
|
apache-2.0
| 413
| 0.004843
|
def consumer():
r = ''
while True:
n = yield r
if not n:
return
print('[CONSUMER] Consuming %s...' % n)
|
r = '200 OK'
def produce(c):
c.send(None)
n = 0
while n < 5:
n = n + 1
print('[PRODUCER] Producing %s...' % n)
r = c.send(n)
print('[PRODUCER] Consumer retu
|
rn: %s' % r)
c.close()
c = consumer()
produce(c)
|
Freso/listenbrainz-server
|
listenbrainz_spark/stats/tests/test_utils.py
|
Python
|
gpl-2.0
| 2,000
| 0.0035
|
from datetime import datetime
import listenbrainz_spark.stats.utils as stats_utils
from listenbrainz_spark.path import LISTENBRAINZ_DATA_DIRECTORY
from listenbrainz_spark import utils
from listenbrainz_spark.tests import SparkTestCase
from listenbrainz_spark.stats import offset_months,
|
offset_days
from pyspark.sql import Row
class UtilsTestCase(SparkTestCase):
# use path_ as prefix for all paths in this class.
path_ = LISTENBRAINZ_DATA_DIRECTORY
def tearDown(self):
path_found = utils.path_exists(self.path_)
if path_found:
utils.delete_dir(self.path_, recursive=True)
def test_get_latest_listen_ts(self):
date = datetime(2020, 5, 18)
df = utils.create_dataframe(Row(listened_at
|
=date), schema=None)
df = df.union(utils.create_dataframe(Row(listened_at=offset_days(date, 7)), schema=None))
utils.save_parquet(df, '{}/2020/5.parquet'.format(self.path_))
result = stats_utils.get_latest_listen_ts()
self.assertEqual(date, result)
def test_filter_listens(self):
from_date = datetime(2020, 5, 1)
to_date = datetime(2020, 5, 31)
df = utils.create_dataframe(Row(listened_at=offset_months(from_date, 1)), None)
df = df.union(utils.create_dataframe(Row(listened_at=offset_months(to_date, 1, shift_backwards=False)), None))
df = df.union(utils.create_dataframe(Row(listened_at=offset_days(from_date, 5, shift_backwards=False)), None))
df = df.union(utils.create_dataframe(Row(listened_at=offset_days(to_date, 5)), None))
result = stats_utils.filter_listens(df, from_date, to_date)
rows = result.collect()
self.assertEqual(rows[0]['listened_at'], offset_days(from_date, 5, shift_backwards=False))
self.assertEqual(rows[1]['listened_at'], offset_days(to_date, 5))
def test_get_last_monday(self):
date = datetime(2020, 5, 19)
self.assertEqual(datetime(2020, 5, 18), stats_utils.get_last_monday(date))
|
shanot/imp
|
modules/domino/test/test_bandb_sampler.py
|
Python
|
gpl-3.0
| 1,701
| 0.000588
|
from __future__ import print_function
import IMP
import IMP.test
import IMP.domino
import IMP.core
class TrivialParticleStates(IMP.domino.ParticleStates):
def __init__(self, n):
IMP.domino.ParticleStates.__init__(self)
self.key = IMP.IntKey("hi")
self.n = n
def get_number_of_particle_states(self):
return self.n
def load_particle_state(self, i, p):
if p.has_attribute(self.key):
p.set_value(self.key, i)
else:
p.add_attribute(self.key, i)
def do_show(self, stream):
pass
class Tests(IMP.test.TestCase):
def test_global_min1(self):
"""Testing branch and bound sampler"""
m = IMP.Model()
m.set_log_level(IMP.SILENT)
IMP.set_log_level(IMP.VERBOSE)
ps = []
ns = 5
np = 4
for i in range(0, np):
ps.append(IMP.Particle(m))
pst = IMP.domino.ParticleStatesTable()
print(m.get_number_of_score_states())
dsst = IMP.domino.BranchAndBoundSampler(m, pst)
dsst.set_restraints([])
|
for p in ps:
pst.set_particle_states(p, TrivialParticleStates(ns))
cs = dsst.create_sample()
self.assertEqual(cs.get_number_of_configurations(), ns ** len(ps))
all_states = []
for i in range(0, cs.get_number_of_configurations()):
cs.load_configuration(i)
s = []
for p in ps:
s.append(p.get_value(IMP.IntKey("hi")))
|
ss = IMP.domino.Assignment(s)
# print all_states
self.assertNotIn(s, all_states)
all_states.append(s)
if __name__ == '__main__':
IMP.test.main()
|
mholgatem/GPIOnext
|
cursesmenu/items/external_item.py
|
Python
|
mit
| 1,040
| 0.002885
|
import curses
from cursesmenu import clear_terminal
from cursesmenu.items import MenuItem
class ExternalItem(MenuItem):
"""
A base class for items that need to do stuff on the console ou
|
tside of curses mode.
Sets the terminal back to standard mode until the action is done.
Should probably be subclasse
|
d.
"""
def __init__(self, text, menu=None, should_exit=False):
# Here so Sphinx doesn't copy extraneous info from the superclass's docstring
super(ExternalItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
def set_up(self):
"""
This class overrides this method
"""
self.menu.pause()
curses.def_prog_mode()
clear_terminal()
self.menu.clear_screen()
def clean_up(self):
"""
This class overrides this method
"""
self.menu.clear_screen()
curses.reset_prog_mode()
curses.curs_set(1) # reset doesn't do this right
curses.curs_set(0)
self.menu.resume()
|
blag/django-cities
|
cities/south_migrations/0001_initial.py
|
Python
|
mit
| 19,791
| 0.007124
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table(u'cities_country', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=200)),
('code', self.gf('django.db.models.fields.CharField')(max_length=2, db_index=True)),
('code3', self.gf('django.db.models.fields.CharField')(max_length=3, db_index=True)),
('population', self.gf('django.db.models.fields.IntegerField')()),
('area', self.gf('django.db.models.fields.IntegerField')(null=True)),
('currency', self.gf('django.db.models.fields.CharField')(max_length=3, null=True)),
('currency_name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('languages', self.gf('django.db.models.fields.CharField')(max_length=250, null=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=20)),
('continent', self.gf('django.db.models.fields.CharField')(max_length=2)),
('tld', self.gf('django.db.models.fields.CharField')(max_length=5)),
('capital', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'cities', ['Country'])
# Adding M2M table for field alt_names on 'Country'
m2m_table_name = db.shorten_name(u'cities_country_alt_names')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('country', models.ForeignKey(orm[u'cities.country'], null=False)),
('alternativename', models.ForeignKey(orm[u'cities.alternativename'], null=False))
))
db.create_unique(m2m_table_name, ['country_id', 'alternativename_id'])
# Adding M2M table for field neighbours on 'Country'
m2m_table_name = db.shorten_name(u'cities_country_neighbours')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
|
('from_country', models.ForeignKey(orm[u'cities.country'], null=False)),
('to_country', models.ForeignKey(orm[u'cities.country'], null=False))
))
db.create_unique(m2m_table_name, ['from_country_id', 'to_country_id'])
# Adding model 'Region'
db.create_table(u'
|
cities_region', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=200)),
('name_std', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities.Country'])),
))
db.send_create_signal(u'cities', ['Region'])
# Adding M2M table for field alt_names on 'Region'
m2m_table_name = db.shorten_name(u'cities_region_alt_names')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('region', models.ForeignKey(orm[u'cities.region'], null=False)),
('alternativename', models.ForeignKey(orm[u'cities.alternativename'], null=False))
))
db.create_unique(m2m_table_name, ['region_id', 'alternativename_id'])
# Adding model 'Subregion'
db.create_table(u'cities_subregion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=200)),
('name_std', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities.Region'])),
))
db.send_create_signal(u'cities', ['Subregion'])
# Adding M2M table for field alt_names on 'Subregion'
m2m_table_name = db.shorten_name(u'cities_subregion_alt_names')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('subregion', models.ForeignKey(orm[u'cities.subregion'], null=False)),
('alternativename', models.ForeignKey(orm[u'cities.alternativename'], null=False))
))
db.create_unique(m2m_table_name, ['subregion_id', 'alternativename_id'])
# Adding model 'City'
db.create_table(u'cities_city', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=200)),
('name_std', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('location', self.gf('django.contrib.gis.db.models.fields.PointField')()),
('population', self.gf('django.db.models.fields.IntegerField')()),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities.Region'], null=True, blank=True)),
('subregion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities.Subregion'], null=True, blank=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities.Country'])),
('elevation', self.gf('django.db.models.fields.IntegerField')(null=True)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=10)),
('timezone', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal(u'cities', ['City'])
# Adding M2M table for field alt_names on 'City'
m2m_table_name = db.shorten_name(u'cities_city_alt_names')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('city', models.ForeignKey(orm[u'cities.city'], null=False)),
('alternativename', models.ForeignKey(orm[u'cities.alternativename'], null=False))
))
db.create_unique(m2m_table_name, ['city_id', 'alternativename_id'])
# Adding model 'District'
db.create_table(u'cities_district', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=200)),
('name_std', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('location', self.gf('django.contrib.gis.db.models.fields.PointField')()),
('population', self.gf('django.db.models.fields.IntegerField')()),
('city', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities.City'])),
))
db.send_create_signal(u'cities', ['District'])
# Adding M2M table for field alt_names on 'District'
m2m_table_name = db.shorten_name(u'cities_district_alt_names')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('district', models.ForeignKey(orm[u'cities.district'], null=False)),
('alternativenam
|
smileboywtu/LTCodeSerialDecoder
|
netease/const.py
|
Python
|
apache-2.0
| 163
| 0.01227
|
# encoding: UTF-8
import os
class Constant:
conf_dir = os.path.join(os.path.expanduser('~'), '.netease-musicbox'
|
)
download_dir = conf_dir + "/cach
|
ed"
|
odoomrp/odoomrp-wip
|
mrp_operations_rejected_quantity/models/operation_time_line.py
|
Python
|
agpl-3.0
| 1,038
| 0
|
# -*- coding: utf-8 -*-
# (c) 20
|
16 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class OperationTimeLine(models.Model):
_inherit = 'operation.time.line'
@api.depends('accepte
|
d_amount', 'rejected_amount')
@api.multi
def _compute_total_amount(self):
for line in self:
line.total_amount = line.accepted_amount + line.rejected_amount
employee_id = fields.Many2one(
comodel_name='hr.employee', string='Employee', readonly=True)
accepted_amount = fields.Integer(
string='Accepted amount', default=0)
rejected_amount = fields.Integer(
string='Rejected amount', default=0)
total_amount = fields.Integer(
string='Total amount', default=0, compute='_compute_total_amount')
state = fields.Selection(
[('pending', 'Pending'),
('processed', 'Processed'),
('canceled', 'Canceled')
], string="State", default='pending', required=True)
|
AbletonAG/abl.util
|
test/test_bunch.py
|
Python
|
mit
| 1,113
| 0.002695
|
from unittest import TestCase
import pickle
from abl.util import (
Bunch,
)
class Derived(Bunch):
pass
class TestBunch(TestCase):
def test_as_dict(self):
bunch = Bunch(a='a', b='b')
assert bunch == dict(a='a', b='b')
def test_as_obj(self):
bunch = Bunch(a='a', b='b')
assert bunch.a == 'a'
assert bunch.b == 'b'
def test_failing_attribute(self):
bunch = Bunch(a='a', b='b')
self.assertRaises(AttributeError, getattr, bunch, 'c')
def test_failing_key(self):
bunch = Bunch(a='a', b='b')
self.assertRaises(KeyError, lambda:bunch['c'])
def test_pickling(self):
bunch = Bunch(a='a', b='b')
dump = pickle.dumps(bunch)
from_pickle = pickle.loads(dump)
assert bunch == from_pickle
assert from_pickle.__class_
|
_ is Bunch
def test_pickling_derived_class(sel
|
f):
derived = Derived(a='a', b='b')
dump = pickle.dumps(derived)
from_pickle = pickle.loads(dump)
assert derived == from_pickle
assert from_pickle.__class__ is Derived
|
smartsheet-platform/smartsheet-python-sdk
|
smartsheet/models/access_token.py
|
Python
|
apache-2.0
| 2,612
| 0
|
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from ..types import *
from ..util import serialize
from ..util import deserialize
class AccessToken(object):
"""Smartsheet AccessToken data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the AccessToken model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self.allowed_values = {
'token_type': [
'bearer']}
|
self._access_token = String()
self._expires_at = Timestamp()
self._expires_in = Number()
self._refresh_token = String()
self._token_type = String(
accept=self.allowed_values['token_type']
)
if props:
d
|
eserialize(self, props)
# requests package Response object
self.request_response = None
@property
def access_token(self):
return self._access_token.value
@access_token.setter
def access_token(self, value):
self._access_token.value = value
@property
def expires_at(self):
return self._expires_at.value
@expires_at.setter
def expires_at(self, value):
self._expires_at.value = value
@property
def expires_in(self):
return self._expires_in.value
@expires_in.setter
def expires_in(self, value):
self._expires_in.value = value
@property
def refresh_token(self):
return self._refresh_token.value
@refresh_token.setter
def refresh_token(self, value):
self._refresh_token.value = value
@property
def token_type(self):
return self._token_type.value
@token_type.setter
def token_type(self, value):
self._token_type.value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
ekivemark/my_device
|
bbp/bbp/settings.py
|
Python
|
apache-2.0
| 11,481
| 0.002178
|
"""
Django settings for bbp_oa project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APPS_DIR = os.path.join(BASE_DIR, 'bbp/apps')
sys.path.insert(0, APPS_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Use this to review Settings at run time
DEBUG_SETTINGS = True
APPLICATION_TITLE="MedYear:Device"
if DEBUG_SETTINGS:
print "Application: %s" % APPLICATION_TITLE
print ""
print "BASE_DIR:%s " % BASE_DIR
print "APPS_DIR:%s " % APPS_DIR
ALLOWED_HOSTS = []
ADMINS = (
('Mark Scrimshire', 'mark@ekivemark.com'),
)
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = (
# add admin_bootstrapped items before django.contrib.admin
'django_admin_bootstrapped.bootstrap3',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'bootstrap_themes',
# https://django-oauth2-provider.readthedocs.org/en/latest/getting_started.html
#'provider',
#'provider.oauth2',
# http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html
'oauth2_provider',
'corsheaders',
'rest_framework',
'device',
'bbp.member',
'bbp.member.vutils',
)
AUTHENTICATION_BACKENDS = (
'oauth2_provider.backends.OAuth2Backend',
# Uncomment following if you want to access the admin
'django.contrib.auth.backends.ModelBackend',
#'...',
)
# https://docs.djangoproject.com/en/1.7/topics/auth/customizing/#a-full-example
#AUTH_USER_MODEL = 'member.MyUser'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
)
# http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html
# Allow CORS requests from all domains (just for the scope of this tutorial):
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'bbp.urls'
WSGI_APPLICATION = 'bbp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DBPATH = os.path.join(BASE_DIR, 'db/db.db')
if DEBUG_SETTINGS:
print "DBPATH:",DBPATH
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': DBPATH, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_
|
zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this
|
to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
# STATIC_ROOT = ''
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
if DEBUG_SETTINGS:
print "STATIC_ROOT:%s" % STATIC_ROOT
ADMIN_MEDIA_PREFIX = '/static/admin'
MAIN_STATIC_ROOT = os.path.join(BASE_DIR, 'mainstatic')
if DEBUG_SETTINGS:
print "MAIN_STATIC_ROOT:%s" % MAIN_STATIC_ROOT
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
MAIN_STATIC_ROOT,
# '/Users/mark/PycharmProjects/virtualenv/rb/rainbowbutton/static',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'bbp/templates'),
)
TEMPLATE_VISIBLE_SETTINGS = {
# Put Strings here that you want to be visible in the templates
# then add settings_context_processor
'DEBUG',
'TEMPLATE_DEBUG',
'APPLICATION_TITLE',
}
TEMPLATE_MODULES = {
# Put the names of custom modules in this section
# This will be used by home.index to display a list of modules
# that can be called
'privacy',
'about',
'contact',
'terms',
'faq',
'admin',
'member/view',
'member/get_id',
'accounts/logout',
'accounts/login',
}
TEMPLATE_CONTEXT_PROCESSORS = (
# Use a context processor to enable frequently used settings variables
# to be used in templates
'django.contrib.auth.context_processors.auth',
'bbp.settings_context_processor.settings',
)
# Default settings for bootstrap 3
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.2.0/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': None,
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': None,
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-2',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-4',
# Set HTML required attribute on required fields
'set_required': True,
# Set placeholder attributes to label if
|
forkbong/qutebrowser
|
scripts/dev/update_version.py
|
Python
|
gpl-3.0
| 3,202
| 0.000312
|
#!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2018-2021 Andy Mender <andymenderunix@gmail.com>
# Copyright 2019-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Update version numbers using bump2version."""
import sys
import argparse
import os.path
import subprocess
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
from scripts import utils
def bump_version(version_leap="patch"):
"""Update qutebrowser release version.
Args:
version_leap: define the jump between versions
("major", "minor", "patch")
"""
subprocess.run([sys.executable, '-m', 'bumpversion', version_leap],
check=True)
def show_commit():
subprocess.run(['git', 'show'], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Update release version.")
parser.add_argument('bump', action="store",
choices=["major", "minor", "patch"],
help="Update release version")
parser.add_argument('--commands', action="store_true",
|
help="Only show commands to run post-release.")
args = parser.parse_args()
utils.change_cwd()
if not args.commands:
bump_version(args.bump)
show_commit()
import qutebrowser
version = qutebrowser.__version__
x_version = '.'.join([str(p) for p in qutebrowser.__version_info__[:-1]] +
['x'])
print("Run the following commands
|
to create a new release:")
print("* git push origin; git push origin v{v}".format(v=version))
if args.bump == 'patch':
print("* git checkout master && git cherry-pick v{v} && "
"git push origin".format(v=version))
else:
print("* git branch v{x} v{v} && git push --set-upstream origin v{x}"
.format(v=version, x=x_version))
print("* Create new release via GitHub (required to upload release "
"artifacts)")
print("* Linux: git fetch && git checkout v{v} && "
"tox -e build-release -- --upload"
.format(v=version))
print("* Windows: git fetch; git checkout v{v}; "
"py -3.7 -m tox -e build-release -- --asciidoc "
"$env:userprofile\\bin\\asciidoc-9.0.4\\asciidoc.py --upload"
.format(v=version))
print("* macOS: git fetch && git checkout v{v} && "
"tox -e build-release -- --upload"
.format(v=version))
|
sfu-fas/coursys
|
oldcode/alerts/forms.py
|
Python
|
gpl-3.0
| 1,401
| 0.012848
|
from django import forms
from django.forms.models import ModelForm
from .models import Alert, AlertType, AlertUpdate, AlertEmailTemplate
from django.template import Template, TemplateSyntaxError
class AlertTypeForm(ModelForm):
class Meta:
model = AlertType
exclude = ('hidden', 'config')
class EmailForm(ModelForm):
class Meta:
model = AlertEmailTemplate
exclude = ('alerttype', 'created_at', 'created_by', 'hidden', 'config')
def clean_content(self):
content = self.cleaned_data['content']
try:
Template(content)
except Template
|
SyntaxError as e:
raise forms.ValidationError('Syntax error in template: ' + str(e))
return content
class ResolutionForm(ModelForm):
class Meta:
model = AlertUpdate
exclude = ('alert', 'update_type', 'created_at', 'hidden')
class EmailResoluti
|
onForm(ModelForm):
from_email = forms.CharField( label="From", required=True )
to_email = forms.CharField( label="To", required=True )
subject = forms.CharField( label="Subject", required=True )
class Meta:
model = AlertUpdate
fields = ('to_email', 'from_email', 'subject', 'comments', 'resolved_until')
class AlertUpdateForm(ModelForm):
class Meta:
model = AlertUpdate
exclude = ('alert', 'update_type', 'created_at', 'hidden', 'resolved_until' )
|
rosscdh/django-crocodoc
|
setup.py
|
Python
|
gpl-2.0
| 911
| 0.021954
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because n
|
ow 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "django-crocodoc",
packages=['dj_crocodoc'],
description = ("Django app for integrating with CrocoDoc"),
version = "0.1.5",
author = "Ross Crawford-d'Heureuse",
author_email = "ross@lawpal.com",
li
|
cense = "MIT",
keywords = "django crocdoc app",
url = "https://github.com/rosscdh/django-crocodoc",
install_requires = [
'crocodoc',
'django-braces',
'django-jsonfield',
'django-uuidfield',
'bunch',
'HTTPretty', # for testing
]
)
|
yorgenisparacare/tuconsejocomunal
|
tcc_consejocomunales/__openerp__.py
|
Python
|
gpl-3.0
| 943
| 0.002123
|
# -*- coding: utf-8 -*-
{
'name': "Gestión de los Consejos Comunales",
'summary': """
Short (1 phrase/line) summary of th
|
e module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Your Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://g
|
ithub.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base','usuarios_venezuela'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'vistas/consejocomunal_views.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
|
djangobrasil/djangobrasil.org
|
src/djangobrasil/success_cases/moderator.py
|
Python
|
gpl-3.0
| 104
| 0.028846
|
#from
|
moderation import moderation
#from .models import Su
|
ccessCase
#moderation.register(SuccessCase)
|
broderboy/ai-stager
|
stager/staging/management/commands/cleanupslides.py
|
Python
|
mit
| 596
| 0.011745
|
from django.core.management.base imp
|
ort NoArgsCommand
|
from django.conf import settings
class Command(NoArgsCommand):
help = "Removes CompSlides from the database that do not have matching files on the drive."
def handle_noargs(self, **options):
from stager.staging.models import CompSlide
import os.path
slides = CompSlide.objects.all()
for slide in slides:
if not os.path.exists(settings.MEDIA_ROOT+"/"+str(slide.image)):
print str(slide.image), "deleted"
slide.delete()
|
sxjscience/tvm
|
python/tvm/relay/op/_transform.py
|
Python
|
apache-2.0
| 26,343
| 0.000835
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend compiler related feature registration"""
# pylint: disable=invalid-name,unused-argument, len-as-condition, too-many-nested-blocks, too-many-local-variables, too-many-arguments
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.te.hybrid import script
from tvm.runtime import convert
from tvm import topi
from tvm.topi.util import get_const_int, get_const_tuple
from . import op as _reg
from . import strategy
from .op import OpPattern
from ._tensor import elemwise_shape_func
_reg.register_broadcast_schedule("broadcast_to")
_reg.register_broadcast_schedule("broadcast_to_like")
_reg.register_broadcast_schedule("expand_dims")
_reg.register_broadcast_schedule("repeat")
_reg.register_broadcast_schedule("tile")
_reg.register_broadcast_schedule("where")
_reg.register_injective_schedule("squeeze")
_reg.register_injective_schedule("reshape")
_reg.register_injective_schedule("reshape_like")
_reg.register_injective_schedule("full")
_reg.register_injective_schedule("full_like")
_reg.register_injective_schedule("arange")
_reg.register_injective_schedule("meshgrid")
_reg.register_injective_schedule("reverse")
_reg.register_injective_schedule("reverse_sequence")
_reg.register_injective_schedule("cast")
_reg.register_injective_schedule("cast_like")
_reg.register_injective_schedule("reinterpret")
_reg.register_injective_schedule("strided_slice")
_reg.register_injective_schedule("slice_like")
_reg.register_injective_schedule("split")
_reg.register_injective_schedule("take")
_reg.register_injective_schedule("transpose")
_reg.register_injective_schedule("stack")
_reg.register_injective_schedule("contrib_reverse_reshape")
_reg.register_injective_schedule("gather")
_reg.register_injective_schedule("gather_nd")
_reg.register_injective_schedule("sequence_mask")
_reg.register_injective_schedule("one_hot")
_reg.register_reduce_schedule("collapse_sum_like")
_reg.register_reduce_schedule("collapse_sum_to")
_reg.register_injective_schedule("unravel_index")
_reg.register_injective_schedule("sparse_to_dense")
_reg.register_injective_schedule("matrix_set_diag")
_reg.register_injective_schedule("adv_index")
# concatenate
_reg.register_schedule("concatenate", strategy.schedule_concatenate)
# strided_set
@_reg.register_compute("strided_set")
def compute_strided_set(attrs, inputs, output_type):
"""Compute definition of strided_set"""
return [topi.strided_set(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4])]
_reg.register_injective_schedule("strided_set")
# layout_transform
_reg.register_injective_schedule("layout_transform")
_reg.register_pattern("layout_transform", OpPattern.INJECTIVE)
# argwhere
@_reg.register_compute("argwhere")
def compute_argwhere(attrs, inputs, output_type):
"""Compute definition of argwhere"""
output_shape = []
for s in output_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
# see Any, replace it with a var
output_shape.append(te.var("any_dim", "int32"))
new_output_type = tvm.relay.ty.TensorType(output_shape, "int32")
return [topi.argwhere(new_output_type,
|
inputs[0])]
_reg.register_schedule("argwhere", strategy.schedule_argwhere)
# scatter
@_reg.register_compute("scatter")
def compute_scatter(attrs, inputs, output_type):
"""Compute definition of sc
|
atter"""
return [topi.scatter(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_schedule("scatter", strategy.schedule_scatter)
# scatter_add
@_reg.register_compute("scatter_add")
def compute_scatter_add(attrs, inputs, output_type):
"""Compute definition of scatter_add"""
return [topi.scatter_add(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_schedule("scatter_add", strategy.schedule_scatter_add)
#####################
# Shape functions #
#####################
@script
def _arange_shape_func(start, stop, step):
out = output_tensor((1,), "int64")
if step[0] < 0:
out[0] = int64(ceil_div((int64(start[0]) - int64(stop[0])), int64(-step[0])))
else:
out[0] = int64(ceil_div((int64(stop[0]) - int64(start[0])), int64(step[0])))
return out
@_reg.register_shape_func("arange", True)
def arange_shape_func(attrs, inputs, _):
"""
Shape func for arange
"""
return [_arange_shape_func(*inputs)]
@script
def _strided_slice_shape_func_input_shape(data_shape, begin, end, strides, slice_mode):
ndim = data_shape.shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
cbegin = int64(0)
cend = int64(data_shape[i])
cstride = int64(1)
if len(strides) > i:
cstride = int64(strides[i])
if len(begin) > i:
cbegin = int64(begin[i])
if cbegin < 0:
cbegin += int64(data_shape[i])
if len(end) <= i:
cend = int64(data_shape[i])
elif slice_mode != 0:
cstride = int64(1)
if end[i] < 0:
cend = int64(data_shape[i])
else:
cend = cbegin + int64(end[i])
else:
if end[i] > data_shape[i]:
cend = int64(data_shape[i])
elif end[i] < -data_shape[i]:
cend = int64(-1)
else:
cend = int64(end[i])
if cend < 0:
cend += int64(data_shape[i])
assert cstride != 0, "Strides can't be zero."
if cstride < 0:
slice_range = cbegin - cend
step = -cstride
else:
slice_range = cend - cbegin
step = cstride
out[i] = int64(ceil_div(slice_range, step))
return out
@_reg.register_shape_func("strided_slice", False)
def strided_slice_shape_func(attrs, inputs, _):
"""
Shape func for strided_slice
"""
slice_mode = convert(0 if attrs.slice_mode == "end" else 1)
return [
_strided_slice_shape_func_input_shape(
inputs[0], attrs.begin, attrs.end, attrs.strides, slice_mode
)
]
@script
def _concatenate_shape_func(inputs, axis):
ndim = inputs[0].shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
if i != axis:
out[i] = inputs[0][i]
for j in const_range(1, len(inputs)):
assert out[i] == inputs[j][i], "Dims mismatch in the inputs of concatenate."
else:
out[i] = int64(0)
for j in const_range(len(inputs)):
out[i] += inputs[j][i]
return out
@_reg.register_shape_func("concatenate", False)
def concatenate_shape_func(attrs, inputs, _):
axis = get_const_int(attrs.axis)
if axis < 0:
axis += inputs[0].shape[0]
return [_concatenate_shape_func(inputs, convert(axis))]
@script
def _reshape_shape_func_input_shape(data_shape, newshape, ndim):
out = output_tensor((ndim,), "int64")
src_idx = 0
dst_idx = 0
infer_idx = -1
copy = False
skip = 0
for i in const_range(len(newshape)):
if skip > 0:
skip -= 1
elif newshape[i] > 0:
out[dst_idx] = int64(newshape[i])
src_idx += 1
dst_idx += 1
elif newshape[i] == 0:
out[dst_idx] = data_shape[src_idx]
src_idx += 1
dst_idx += 1
elif newshape[i] == -1:
assert infer_idx < 0
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/ete2/tools/phylobuild_lib/task/cog_selector.py
|
Python
|
mit
| 10,562
| 0.008426
|
# #START_LICENSE####################
|
#######################################
#
#
# This file is part of the Environment for Tree Explor
|
ation program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from StringIO import StringIO
import cPickle
from string import strip
from collections import defaultdict
import logging
import os
log = logging.getLogger("main")
from ete2.tools.phylobuild_lib.master_task import CogSelectorTask
from ete2.tools.phylobuild_lib.errors import DataError, TaskError
from ete2.tools.phylobuild_lib.utils import (GLOBALS, print_as_table, generate_node_ids,
encode_seqname, md5, pjoin, _min, _max, _mean, _median, _std)
from ete2.tools.phylobuild_lib import db
__all__ = ["CogSelector"]
class CogSelector(CogSelectorTask):
def __init__(self, target_sp, out_sp, seqtype, conf, confname):
self.missing_factor = float(conf[confname]["_species_missing_factor"])
self.max_missing_factor = float(conf[confname]["_max_species_missing_factor"])
self.cog_hard_limit = int(conf[confname]["_max_cogs"])
node_id, clade_id = generate_node_ids(target_sp, out_sp)
# Initialize task
CogSelectorTask.__init__(self, node_id, "cog_selector",
"MCL-COGs", None, conf[confname])
# taskid does not depend on jobs, so I set it manually
self.cladeid = clade_id
self.seqtype = seqtype
self.targets = target_sp
self.outgroups = out_sp
self.init()
self.size = len(target_sp | out_sp)
self.cog_analysis = None
self.cogs = None
def finish(self):
def sort_cogs_by_size(c1, c2):
'''
sort cogs by descending size. If two cogs are the same size, sort
them keeping first the one with the less represented
species. Otherwise sort by sequence name sp_seqid.'''
r = -1 * cmp(len(c1), len(c2))
if r == 0:
# finds the cog including the less represented species
c1_repr = _min([sp2cogs[_sp] for _sp, _seq in c1])
c2_repr = _min([sp2cogs[_sp] for _sp, _seq in c2])
r = cmp(c1_repr, c2_repr)
if r == 0:
return cmp(sorted(c1), sorted(c2))
else:
return r
else:
return r
def sort_cogs_by_sp_repr(c1, c2):
c1_repr = _min([sp2cogs[_sp] for _sp, _seq in c1])
c2_repr = _min([sp2cogs[_sp] for _sp, _seq in c2])
r = cmp(c1_repr, c2_repr)
if r == 0:
r = -1 * cmp(len(c1), len(c2))
if r == 0:
return cmp(sorted(c1), sorted(c2))
else:
return r
else:
return r
all_species = self.targets | self.outgroups
# strict threshold
#min_species = len(all_species) - int(round(self.missing_factor * len(all_species)))
# Relax threshold for cog selection to ensure sames genes are always included
min_species = len(all_species) - int(round(self.missing_factor * len(GLOBALS["target_species"])))
min_species = max(min_species, (1-self.max_missing_factor) * len(all_species))
smallest_cog, largest_cog = len(all_species), 0
all_singletons = []
sp2cogs = defaultdict(int)
for cognumber, cog in enumerate(open(GLOBALS["cogs_file"])):
sp2seqs = defaultdict(list)
for sp, seqid in [map(strip, seq.split(GLOBALS["spname_delimiter"], 1)) for seq in cog.split("\t")]:
sp2seqs[sp].append(seqid)
one2one_cog = set()
for sp, seqs in sp2seqs.iteritems():
#if len(seqs) != 1:
# print sp, len(seqs)
if sp in all_species and len(seqs) == 1:
sp2cogs[sp] += 1
one2one_cog.add((sp, seqs[0]))
smallest_cog = min(smallest_cog, len(one2one_cog))
largest_cog = max(largest_cog, len(one2one_cog))
all_singletons.append(one2one_cog)
#if len(one2one_cog) >= min_species:
# valid_cogs.append(one2one_cog)
cognumber += 1 # sets the ammount of cogs in file
for sp, ncogs in sorted(sp2cogs.items(), key=lambda x: x[1], reverse=True):
log.log(28, "% 20s found in single copy in % 6d (%0.1f%%) COGs " %(sp, ncogs, 100 * ncogs/float(cognumber)))
valid_cogs = sorted([sing for sing in all_singletons if len(sing) >= min_species],
sort_cogs_by_size)
log.log(28, "Largest cog size: %s. Smallest cog size: %s" %(
largest_cog, smallest_cog))
self.cog_analysis = ""
# save original cog names hitting the hard limit
if len(valid_cogs) > self.cog_hard_limit:
log.warning("Applying hard limit number of COGs: %d out of %d available" %(self.cog_hard_limit, len(valid_cogs)))
self.raw_cogs = valid_cogs[:self.cog_hard_limit]
self.cogs = []
# Translate sequence names into the internal DB names
sp_repr = defaultdict(int)
sizes = []
for co in self.raw_cogs:
sizes.append(len(co))
for sp, seq in co:
sp_repr[sp] += 1
co_names = ["%s%s%s" %(sp, GLOBALS["spname_delimiter"], seq) for sp, seq in co]
encoded_names = db.translate_names(co_names)
if len(encoded_names) != len(co):
print set(co) - set(encoded_names.keys())
raise DataError("Some sequence ids could not be translated")
self.cogs.append(encoded_names.values())
# ERROR! COGs selected are not the prioritary cogs sorted out before!!!
# Sort Cogs according to the md5 hash of its content. Random
# sorting but kept among runs
#map(lambda x: x.sort(), self.cogs)
#self.cogs.sort(lambda x,y: cmp(md5(','.join(x)), md5(','.join(y))))
log.log(28, "Analysis of current COG selection:")
for sp, ncogs in sorted(sp_repr.items(), key=lambda x:x[1], reverse=True):
log.log(28, " % 30s species present in % 6d COGs (%0.1f%%)" %(sp, ncogs, 100 * ncogs/float(len(self.cogs))))
log.log(28, " %d COGs selected with at least %d species out of %d" %(len(self.cogs), min_species, len(all_species)))
log.log(28, " Average COG size %0.1f/%0.1f +- %0.1f" %(_mean(sizes), _median(sizes), _std(sizes)))
# Some consistency checks
missing_sp = (all_species) - set(sp_repr.keys())
if missing_sp:
log.error("%d missing species or not present in single-copy in any cog:\n%s" %\
(len(missing_sp), '\n'.join(missing_sp)))
|
alexforencich/python-ivi
|
ivi/rigol/rigolDS4014.py
|
Python
|
mit
| 1,674
| 0.002987
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .rigolDS4000 import *
class rigolDS4014(rigolDS4000):
"Rigol DS4014 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DS4014')
super(rigolDS4014, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self.
|
_digital_channel_count = 0
self._channe
|
l_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 100e6
self._bandwidth_limit = {'20M': 20e6}
self._init_channels()
|
gurunars/dict-validator
|
dict_validator/fields/choice_field.py
|
Python
|
mit
| 1,274
| 0
|
from dict_validator import Field
class Choice(Field):
"""
Accept any type of input a
|
s long as it matches on of the choices
mentioned in the provided list.
:param choices: list of choices to match against
>>> fr
|
om dict_validator import validate, describe
>>> class Schema:
... field = Choice(choices=["ONE", "TWO", 3, 4])
>>> list(validate(Schema, {"field": "ONE"}))
[]
>>> list(validate(Schema, {"field": 4}))
[]
>>> list(validate(Schema, {"field": "4"}))
[(['field'], 'Not among the choices')]
>>> list(validate(Schema, {"field": 1}))
[(['field'], 'Not among the choices')]
>>> list(validate(Schema, {"field": "FOUR"}))
[(['field'], 'Not among the choices')]
>>> from pprint import pprint
>>> pprint(list(describe(Schema)))
[([], {'type': 'Dict'}),
(['field'], {'choices': ['ONE', 'TWO', 3, 4], 'type': 'Choice'})]
"""
def __init__(self, choices, **kwargs):
super(Choice, self).__init__(**kwargs)
self._choices = choices
def _validate(self, value):
if value not in self._choices:
return "Not among the choices"
return None
def _describe(self):
return {
"choices": self._choices
}
|
cubicdaiya/neoagent
|
build/config.py
|
Python
|
bsd-3-clause
| 660
| 0.00303
|
# -*- coding: utf-8 -*-
import sys
cflags = [
'-std=c99',
'-Wall',
'-g',
'-O2',
# '-fno-strict-aliasing',
'-D_GNU_SOURC
|
E',
'-Wimplicit-function-declaration',
'-Wunuse
|
d-variable',
]
libs = [
'pthread',
'ev',
'json',
]
if sys.platform != 'darwin':
libs.append('rt')
includes = [
#'ext',
]
headers = [
'stdint.h',
'stdbool.h',
'unistd.h',
'sys/stat.h',
'sys/types.h',
'sys/socket.h',
'sys/un.h',
'sys/ioctl.h',
'arpa/inet.h',
'netinet/in.h',
'netdb.h',
'signal.h',
'errno.h',
'pthread.h',
'ev.h',
]
funcs = [
'sigaction',
'sigignore',
]
|
mhvk/astropy
|
astropy/cosmology/io/row.py
|
Python
|
bsd-3-clause
| 5,402
| 0.002777
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import numpy as np
from astropy.table import Row
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from .mapping import from_mapping
def from_row(row, *, move_to_meta=False, cosmology=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`.
Parameters
----------
row : `~astropy.table.Row`
The object containing the Cosmology information.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Row with
``from_row``, we will first make a `~astropy.table.Row` using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64 float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this row can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cr, format="astropy.row")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.7 km / (Mpc s), Om0=0.31,
Tcmb0=2.725 K, Neff=3.05, m_nu=[0. 0. 0.06] eV, Ob0=0.049)
"""
# special values
name = row['name'] if 'name' in row.columns else None # get name from column
meta = copy.deepcopy(row.meta)
# turn row into mapping, filling cosmo if not in a column
mapping = dict(row)
mapping["name"] = name
mapping.setdefault("cosmology", meta.pop("cosmology", None))
mapping["meta"
|
] = meta
# build cosmology from map
return from_mapping(mapping, move_to_meta=move_to_meta, cosmology=cosmology)
def to_row(cosmology, *args, cosmology_in_meta=False):
"""Serialize the cosmology into a `~astropy.table.Row`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cosmology_in_meta
|
: bool
Whether to put the cosmology class in the Table metadata (if `True`) or
as the first column (if `False`, default).
Returns
-------
`~astropy.table.Row`
With columns for the cosmology parameters, and metadata in the Table's
``meta`` attribute. The cosmology class name will either be a column
or in ``meta``, depending on 'cosmology_in_meta'.
Examples
--------
A Cosmology as a `~astropy.table.Row` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64 float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
"""
from .table import to_table
table = to_table(cosmology, cosmology_in_meta=cosmology_in_meta)
return table[0] # extract row from table
def row_identify(origin, format, *args, **kwargs):
"""Identify if object uses the `~astropy.table.Row` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Row) and (format in (None, "astropy.row"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.row", Cosmology, from_row)
convert_registry.register_writer("astropy.row", Cosmology, to_row)
convert_registry.register_identifier("astropy.row", Cosmology, row_identify)
|
benzkji/django-cms
|
cms/wizards/views.py
|
Python
|
bsd-3-clause
| 5,882
| 0
|
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.files.storage import FileSystemStorage
from django.forms import Form
from django.template.response import SimpleTemplateResponse
from django.urls import NoReverseMatch
from formtools.wizard.views import SessionWizardView
from cms.models import Page
from cms.utils import get_current_site
from cms.utils.i18n import get_site_language_from_request
from .wizard_pool import wizard_pool
from .forms import (
WizardStep1Form,
WizardStep2BaseForm,
step2_form_factory,
)
class WizardCreateView(SessionWizardView):
template_name = 'cms/wizards/start.html'
file_storage = FileSystemStorage(
location=os.path.join(settings.MEDIA_ROOT, 'wizard_tmp_files'))
form_list = [
('0', WizardStep1Form),
# Form is used as a placeholder form.
# the real form will be loaded after step 0
('1', Form),
]
def dispatch(self, *args, **kwargs):
user = self.request.user
if not user.is_active or not user.is_staff:
raise PermissionDenied
self.site = get_current_site()
return super(WizardCreateView, self).dispatch(*args, **kwargs)
def get_current_step(self):
"""Returns the current step, if possible, else None."""
try:
return self.steps.current
except AttributeError:
return None
def is_first_step(self, step=None):
step = step or self.get_current_step()
return step == '0'
def is_second_step(self, step=None):
step = step or self.get_current_step()
return step == '1'
def get_context_data(self, **kwargs):
context = super(WizardCreateView, self).get_context_data(**kwargs)
if self.is_second_step():
context['wizard_entry'] = self.get_selected_entry()
return context
def get_form(self, step=None, data=None, files=None):
if step is None:
step = self.steps.current
# We need to grab the page from pre-validated data so that the wizard
# has it to prepare the list of valid entries.
if data:
page_key = "{0}-page".format(step)
self.page_pk = data.get(page_key, None)
else:
self.page_pk = None
if self.is_second_step(step):
self.form_list[step] = self.get_step_2_form(step, data, files)
return super(WizardCreateView, self).get_form(step, data, files)
def get_form_kwargs(self, step=None):
"""This is called by self.get_form()"""
kwargs = super(WizardCreateView, self).get_form_kwargs()
kwargs['wizard_user'] = self.request.user
if self.is_second_step(step):
kwargs['wizard_page'] = self.get_origin_page()
kwargs['wizard_language'] = self.get_origin_language()
else:
page_pk = self.page_pk or self.request.GET.get('page', None)
if page_pk and page_pk != 'None':
kwargs['wizard_page'] = Page.objects.filter(pk=page_pk).first()
else:
kwargs['wizard_page'] = None
kwargs['wizard_language'] = get_site_language_from_request(
self.request,
site_id=self.site.pk,
)
return kwargs
def get_form_initial(self, step):
"""This is called by self.get_form()"""
initial = super(WizardCreateView, self).get_form_initial(step)
if self.is_first_step(step):
initial['page'] = self.request.GET.get('page')
initial['language'] = self.request.GET.get('language')
return initial
def get_step_2_form(self, step=None, data=None, files=None):
entry_form_class = self.get_selected_entry().form
step_2_base_form = self.get_step_2_base_form()
form = step2_form_factory(
mixin_cls=step_2_base_form,
entry_form_class=entry_form_class,
)
return form
def get_step_2_base_form(self):
"""
Returns the base form to be used for step 2.
This form is sub classed dynamically by the form defined per module.
"""
return WizardStep2BaseForm
def get_template_names(self):
if self.is_first_step():
template_name = self.template_name
else:
template_name = self.get_selected_entry().template_name
return template_name
def done(self, form_list, **kwargs):
"""
This step only runs if all forms are valid. Simply emits a simple
template that uses JS to redirect to the newly created object.
"""
form_one, form_two = list(form_list)
instance = form_two.save()
url = self.get_success_url(instance)
language = form_one.cleaned_data['language']
if not url:
page = self.get_origin_page()
if page:
try:
url = page.get_absolute_url(language)
except NoReverseMatch:
url = '/'
else:
url = '/'
return SimpleTemplateResponse("cms/wizards/done.html", {"url": url})
def get_selected_entry(self):
data = self.get_cleaned_data_for_step('0')
return wizard_pool.get_entry(data['entry'])
def get_origin_page(self):
data = self.get_cleaned_data_for_step('0')
return data.get('page')
def get_origin_language(self):
data = self.get_clean
|
ed_data_for_step('0')
return data.get('language')
def get_success_url(self, instance):
entry = self.get_selected_entry()
language = self.get_origin_language()
success_url = entry.get_success_url(
obj=instance,
language=language,
)
return s
|
uccess_url
|
cwebster2/pyMeteo
|
pymeteo/cm1/hodographs/straight.py
|
Python
|
bsd-3-clause
| 1,428
| 0.014706
|
import numpy as np
from pymeteo import constants
from .. import OptionsWidget
class straight(OptionsWidget.OptionsWidget):
def __init__(self):
super(straight,self).__init__()
name = 'straight (linear increase)'
variables = [ ('z_constabv', '6000', 'm'),
('z_constblo', '0', 'm'),
('u_max', '30.0', 'm/s'),
('u_scaling', '1.0', ''),
('v_max', '7.0', 'm/s'),
('u_adjust' , '0.0', 'm/s'),
('v_adjust' , '0.0', 'm/s') ]
self.initUI(name, variables)
def plot(self):
# nned z, t, th, p and qv
z = np.arange(0., 22000., 50.)
u = np.zeros(len(z))
v = np.zeros(len(z))
# parameters
zdep1 = self.getOption('z_constabv')
zdep0 = self.getOption('z_constblo')
umax = self.getOption('u_m
|
ax')
sf = self.getOption('u_scaling')
vmax = self.getOption('v_max')
cx = self.getOption('u_adjust')
cy = self.getOption('v_adjust')
for k in range(len(z)):
if (z[k] < zdep0): # constant below this height
u[k] = 0
elif (z[k] < zdep1): # shear
u[k] = ((z[k]-zdep0)/(zdep1-zdep0))*umax
u[k] =
|
u[k]* (1 + (sf-1)*((z[k]-zdep0)/(zdep1-zdep0)))
else: # constant section
u[k] = umax*sf
v[:] = vmax
u[:] = u[:] - cx
v[:] = v[:] - cy
#emit
return (z,u,v)
|
TheBB/badger
|
grevling/__init__.py
|
Python
|
agpl-3.0
| 38,535
| 0.001972
|
from collections.abc import Sequence
from contextlib import contextmanager
from datetime import datetime
from difflib import Differ
import inspect
from itertools import product
import json
import multiprocessing
import operator
import os
from pathlib import Path
import pydoc
import re
import shlex
import shutil
import subprocess
from tempfile import TemporaryDirectory
from time import time as osclock
from typing import Dict, List, Any, Iterable, Optional, Set
from bidict import bidict
from fasteners import InterProcessLock
import numpy as np
import pandas as pd
from simpleeval import SimpleEval, DEFAULT_FUNCTIONS, NameNotDefined
from typing_inspect import get_origin, get_args
from .plotting import Backends
from .render import render
from .schema import load_and_validate
from . import util
__version__ = '0.1.0'
@contextmanager
def time():
start = osclock()
yield lambda: end - start
end = osclock()
def _pandas_dtype(tp):
if tp == int:
return pd.Int64Dtype()
if util.is_list_type(tp):
return object
return tp
def _typename(tp) -> str:
try:
return {int: 'integer', str: 'string', float: 'float', 'datetime64[ns]': 'datetime'}[tp]
except KeyError:
base = {list: 'list'}[get_origin(tp)]
subs = ', '.join(_typename(k) for k in get_args(tp))
return f'{base}[{subs}]'
def _guess_eltype(collection):
if all(isinstance(v, str) for v in collection):
return str
if all(isinstance(v, int) for v in collection):
return int
assert all(isinstance(v, (int, float)) for v in collection)
return float
def call_yaml(func, mapping, *args, **kwargs):
signature = inspect.signature(func)
mapping = {key.replace('-', '_'): value for key, value in mapping.items()}
binding = signature.bind(*args, **kwargs, **mapping)
return func(*binding.args, **binding.kwargs)
class Parameter(Sequence):
@classmethod
def load(cls, name, spec):
if isinstance(spec, list):
return cls(name, spec)
subcls = util.find_subclass(cls, spec['type'], root=False, attr='__tag__')
del spec['type']
return call_yaml(subcls, spec, name)
def __init__(self, name, values):
self.name = name
self.values = values
def __len__(self):
return len(self.values)
def __getitem__(self, index):
return self.values[index]
class UniformParameter(Parameter):
__tag__ = 'uniform'
def __init__(self, name, interval, num):
super().__init__(name, np.linspace(*interval, num=num))
class GradedParameter(Parameter):
__tag__ = 'graded'
def __init__(self, name, interval, num, grading):
lo, hi = interval
step = (hi - lo) * (1 - grading) / (1 - grading ** (num - 1))
values = [lo]
for _ in range(num - 1):
values.append(values[-1] + step)
step *= grading
super().__init__(name, np.array(values))
class ParameterSpace(dict):
def subspace(self, *names: str) -> Iterable[Dict]:
params = [self[name] for name in names]
indexes = [range(len(p)) for p in params]
for values in util.dict_product(names, params):
yield values
def fullspace(self) -> Iterable[Dict]:
yield from self.subspace(*self.keys())
def size(self, *names: str) -> int:
return util.prod(len(self[name]) for name in names)
def size_fullspace(self) -> int:
return self.size(*self.keys())
class ContextManager:
parameters: ParameterSpace
evaluables: Dict[str, str]
constants = Dict[str, Any]
types: Dict[str, Any]
def __init__(self, data: Dict):
self.parameters = ParameterSpace()
for name, paramspec in data.get('parameters', {}).items():
param = Parameter.load(name, paramspec)
self.parameters[param.name] = param
self.evaluables = dict(data.get('evaluate', {}))
self.constants
|
= dict(data.get('constants', {}))
self.types = {
'_index': int,
'_logdir': str,
'_started': 'datetime64[ns]',
'_finished': 'datetime64[ns]',
}
self.types.update(data.get('types', {}))
# Guess types of parameters
for name, param in self.parameters.items():
if name not in self.types:
self.types[name] = _guess_eltype(param)
# Guess types of evaluables
|
if any(name not in self.types for name in self.evaluables):
contexts = list(self.parameters.fullspace())
for ctx in contexts:
self.evaluate_context(ctx, verbose=False)
for name in self.evaluables:
if name not in self.types:
values = [ctx[name] for ctx in contexts]
self.types[name] = _guess_eltype(values)
def evaluate_context(self, context, verbose=True, allowed_missing=(), add_constants=True):
evaluator = SimpleEval(functions={**DEFAULT_FUNCTIONS,
'log': np.log,
'log2': np.log2,
'log10': np.log10,
'sqrt': np.sqrt,
'abs': np.abs,
'ord': ord,
'sin': np.sin,
'cos': np.cos,
})
evaluator.names.update(context)
evaluator.names.update({
k: v for k, v in self.constants.items() if k not in context
})
if allowed_missing is not True:
allowed_missing = set(allowed_missing)
for name, code in self.evaluables.items():
try:
result = evaluator.eval(code) if isinstance(code, str) else code
except NameNotDefined as error:
if allowed_missing is True:
util.log.debug(f'Skipped evaluating: {name}')
continue
elif error.name in allowed_missing:
allowed_missing.add(name)
util.log.debug(f'Skipped evaluating: {name}')
continue
else:
raise
if verbose:
util.log.debug(f'Evaluated: {name} = {repr(result)}')
evaluator.names[name] = context[name] = result
if add_constants:
for k, v in self.constants.items():
if k not in context:
context[k] = v
return context
def subspace(self, *names: str, **kwargs) -> Iterable[Dict]:
for values in self.parameters.subspace(*names):
yield self.evaluate_context(values, **kwargs)
def fullspace(self, **kwargs) -> Iterable[Dict]:
yield from self.subspace(*self.parameters, **kwargs)
class FileMapping:
source: str
target: str
template: bool
mode: str
@classmethod
def load(cls, spec: dict, **kwargs):
if isinstance(spec, str):
return cls(spec, spec, **kwargs)
return call_yaml(cls, spec, **kwargs)
def __init__(self, source, target=None, template=False, mode='simple'):
if target is None:
target = source if mode == 'simple' else '.'
if template:
mode = 'simple'
self.source = source
self.target = target
self.template = template
self.mode = mode
def iter_paths(self, context, sourcepath, targetpath):
if self.mode == 'simple':
yield (
sourcepath / render(self.source, context),
targetpath / render(self.target, context),
)
elif self.mode == 'glob':
target = targetpath / render(self.target, context)
for path in sourcepath.glob(render(self.source, context)):
path = path.relative_to(sourcepath)
yield (sourcepath / path, target / path)
def copy(self, context, sourcepath, targetpath, sourcename='SRC', targetname='TGT', ignore_missing=False):
for source, target in self.iter_paths(context, sourcepath, targetpath):
logsrc = Path(sourcename) / source.relative_to(sourcepath)
logtgt = Path(targetname) / target.relative_to(targetpath)
i
|
consbio/gis-metadata-parser
|
gis_metadata/iso_metadata_parser.py
|
Python
|
bsd-3-clause
| 32,703
| 0.003792
|
""" A module to contain utility ISO-19115 metadata parsing helpers """
from _collections import OrderedDict
from copy import deepcopy
from frozendict import frozendict as FrozenOrderedDict
from parserutils.collections import filter_empty, reduce_value, wrap_value
from parserutils.elements import get_element_name, get_element_text, get_elements_text
from parserutils.elements import get_elements, get_remote_element, insert_element, remove_element
from parserutils.elements import XPATH_DELIM
from gis_metadata.exceptions import InvalidContent
from gis_metadata.metadata_parser import MetadataParser
from gis_metadata.utils import DATE_TYPE, DATE_TYPE_SINGLE, DATE_TYPE_MULTIPLE
from gis_metadata.utils import DATE_TYPE_RANGE, DATE_TYPE_RANGE_BEGIN, DATE_TYPE_RANGE_END
from gis_metadata.utils import ATTRIBUTES
from gis_metadata.utils import CONTACTS
from gis_metadata.utils import BOUNDING_BOX
from gis_metadata.utils import DATES
from gis_metadata.utils import DIGITAL_FORMS
from gis_metadata.utils import KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME
from gis_metadata.utils import LARGER_WORKS
from gis_metadata.utils import PROCESS_STEPS
from gis_metadata.utils import RASTER_DIMS, RASTER_INFO
from gis_metadata.utils import COMPLEX_DEFINITIONS, ParserProperty
from gis_metadata.utils import format_xpaths, get_default_for_complex, get_default_for_complex_sub
from gis_metadata.utils import parse_complex_list, parse_property, update_complex_list, update_property
ISO_ROOTS = ('MD_Metadata', 'MI_Metadata')
KEYWORD_PROPS = (KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME)
KEYWORD_TYPES = FrozenOrderedDict({
KEYWORDS_PLACE: 'place',
KEYWORDS_STRATUM: 'stratum',
KEYWORDS_TEMPORAL: 'temporal',
KEYWORDS_THEME: 'theme'
})
# For appending digital form content to ISO distribution format specs
ISO_DIGITAL_FORMS_DELIM = '@------------------------------@'
# Define backup locations for attribute sub-properties and dimension type property
ISO_DEFINITIONS = dict({k: dict(v) for k, v in dict(COMPLEX_DEFINITIONS).items()})
ISO_DEFINITIONS[ATTRIBUTES].update({
'_definition_source': '{_definition_src}',
'__definition_source': '{__definition_src}',
'___definition_source': '{___definition_src}'
})
ISO_DEFINITIONS[RASTER_DIMS]['_type'] = '{_type}'
ISO_DEFINITIONS = FrozenOrderedDict({k: FrozenOrderedDict(v) for k, v in ISO_DEFINITIONS.items()})
ISO_TAG_ROOTS = OrderedDict((
# First process private dependency tags (order enforced by key sorting)
('_content_coverage', 'contentInfo/MD_CoverageDescription'),
('_dataqual', 'dataQualityInfo/DQ_DataQuality'),
('_dataqual_lineage', '{_dataqual}/lineage/LI_Lineage'),
('_dataqual_report', '{_dataqual}/report'),
('_distinfo', 'distributionInfo/MD_Distribution'),
('_distinfo_dist', '{_distinfo}/distributor/MD_Distributor'),
('_distinfo_proc', '{_distinfo_dist}/distributionOrderProcess/MD_StandardOrderProcess'),
('_distinfo_resp', '{_distinfo_dist}/distributorContact/CI_ResponsibleParty'),
('_distinfo_resp_contact', '{_distinfo_resp}/contactInfo/CI_Contact'),
('_distinfo_rsrc', '{_distinfo}/transferOptions/MD_DigitalTransferOptions/onLine/CI_OnlineResource'),
('_idinfo', 'identificationInfo/MD_DataIdentification'),
('_idinfo_aggregate', '{_idinfo}/aggregationInfo/MD_AggregateInformation'),
('_idinfo_aggregate_citation', '{_idinfo_aggregate}/aggregateDataSetName/CI_Citation'),
('_idinfo_aggregate_contact', '{_idinfo_aggregate_citation}/citedResponsibleParty/CI_ResponsibleParty'),
('_idinfo_citation', '{_idinfo}/citation/CI_Citation'),
('_idinfo_citresp', '{_idinfo_citation}/citedResponsibleParty/CI_ResponsibleParty'),
('_idinfo_extent', '{_idinfo}/extent/EX_Extent'),
('_idinfo_keywords', '{_idinfo}/descriptiveKeywords/MD_Keywords'),
('_idinfo_resp', '{_idinfo}/pointOfContact/CI_ResponsibleParty'),
('_idinfo_resp_contact', '{_idinfo_resp}/contactInfo/CI_Contact'),
('_srinfo_grid_rep', 'spatialRepresentationInfo/MD_GridSpatialRepresentation'),
('_srinfo_grid_dim', '{_srinfo_grid_rep}/axisDimensionProperties/MD_Dimension'),
# Supported in separate file ISO-19110: FC_FeatureCatalog
('_attr_root', 'FC_FeatureCatalogue'),
('_attr_base', 'featureType/FC_FeatureType/c
|
arrierOfCharacteristi
|
cs/FC_FeatureAttribute'),
('_attr_def', '{_attr_base}/definitionReference/FC_DefinitionReference/definitionSource/FC_DefinitionSource'),
('_attr_src', '{_attr_def}/source/CI_Citation/citedResponsibleParty/CI_ResponsibleParty'),
# References to separate file ISO-19110 from: MD_Metadata
('_attr_citation', 'contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation'),
('_attr_contact', '{_attr_citation}/CI_Citation/citedResponsibleParty/CI_ResponsibleParty/contactInfo/CI_Contact'),
('_attr_contact_url', '{_attr_contact}/onlineResource/CI_OnlineResource/linkage/URL')
))
# Two passes required because of self references within roots dict
ISO_TAG_ROOTS.update(format_xpaths(ISO_TAG_ROOTS, **ISO_TAG_ROOTS))
ISO_TAG_ROOTS.update(format_xpaths(ISO_TAG_ROOTS, **ISO_TAG_ROOTS))
ISO_TAG_ROOTS = FrozenOrderedDict(ISO_TAG_ROOTS)
ISO_TAG_FORMATS = {
# Property-specific xpath roots: the base from which each element repeats
'_attribute_accuracy_root': '{_dataqual_report}',
'_attributes_root': 'featureType/FC_FeatureType/carrierOfCharacteristics',
'_bounding_box_root': '{_idinfo_extent}/geographicElement',
'_contacts_root': '{_idinfo}/pointOfContact',
'_dataset_completeness_root': '{_dataqual_report}',
'_dates_root': '{_idinfo_extent}/temporalElement',
'_digital_forms_root': '{_distinfo}/distributionFormat',
'_dist_liability_root': '{_idinfo}/resourceConstraints',
'_transfer_options_root': '{_distinfo}/transferOptions/MD_DigitalTransferOptions/onLine',
'_keywords_root': '{_idinfo}/descriptiveKeywords',
'_larger_works_root': '{_idinfo_aggregate_citation}',
'_process_steps_root': '{_dataqual_lineage}/processStep',
'_raster_info_root': '{_srinfo_grid_rep}/axisDimensionProperties',
'_use_constraints_root': '{_idinfo}/resourceConstraints',
# Then process public dependent tags
'title': '{_idinfo_citation}/title/CharacterString',
'abstract': '{_idinfo}/abstract/CharacterString',
'purpose': '{_idinfo}/purpose/CharacterString',
'supplementary_info': '{_idinfo}/supplementalInformation/CharacterString',
'online_linkages': '{_idinfo_citresp}/contactInfo/CI_Contact/onlineResource/CI_OnlineResource/linkage/URL',
'originators': '{_idinfo_citresp}/organisationName/CharacterString',
'publish_date': '{_idinfo_citation}/date/CI_Date/date/Date',
'publish_date_type': '{_idinfo_citation}/date/CI_Date/dateType/CI_DateTypeCode',
'data_credits': '{_idinfo}/credit/CharacterString',
CONTACTS: '{_idinfo_resp}/{{ct_path}}',
'dist_contact_org': '{_distinfo_resp}/organisationName/CharacterString',
'dist_contact_person': '{_distinfo_resp}/individualName/CharacterString',
'dist_address_type': '{_distinfo_resp_contact}/address/@type',
'dist_address': '{_distinfo_resp_contact}/address/CI_Address/deliveryPoint/CharacterString',
'dist_city': '{_distinfo_resp_contact}/address/CI_Address/city/CharacterString',
'dist_state': '{_distinfo_resp_contact}/address/CI_Address/administrativeArea/CharacterString',
'dist_postal': '{_distinfo_resp_contact}/address/CI_Address/postalCode/CharacterString',
'dist_country': '{_distinfo_resp_contact}/address/CI_Address/country/CharacterString',
'_dist_country': '{_distinfo_resp_contact}/address/CI_Address/country/Country', # If not in CharacterString
'dist_phone': '{_distinfo_resp_contact}/phone/CI_Telephone/voice/CharacterString',
'dist_email': '{_distinfo_resp_contact}/address/CI_Address/electronicMailAddress/CharacterString',
'dist_liability': '{_idinfo}/resourceConstraints/MD_LegalConstraints/otherConstraints/CharacterString',
'processing_fees': '{_distinfo_proc}/fees/CharacterString',
'processing_instrs': '{_distinfo_proc}/orderingInstructions/CharacterString',
'resource_desc': '{_idinfo}/re
|
ihmeuw/vivarium
|
src/vivarium/framework/resource.py
|
Python
|
bsd-3-clause
| 12,125
| 0.000495
|
"""
===================
Resource Management
===================
This module provides a tool to manage dependencies on resources within a
:mod:`vivarium` simulation. These resources take the form of things that can
be created and utilized by components, for example columns in the
:mod:`state table <vivarium.framework.population>`
or :mod:`named value pipelines <vivarium.framework.values>`.
Because these resources need to be created before they can be used, they are
sensitive to ordering. The intent behind this tool is to provide an interface
that allows other managers to register resources with the resource manager
and in turn ask for ordered sequences of these resources according to their
dependencies or raise exceptions if this is not possible.
"""
from types import MethodType
from typing import Any, Callable, Iterable, List
import networkx as nx
from loguru import logger
from vivarium.exceptions import VivariumError
class ResourceError(VivariumError):
"""Error raised when a dependency requirement is violated."""
pass
RESOURCE_TYPES = {
"value",
"value_source",
"missing_value_source",
"value_modifier",
"column",
"stream",
}
NULL_RESOURCE_TYPE = "null"
class ResourceGroup:
"""Resource groups are the nodes in the resource dependency graph.
A resource group represents the pool of resources produced by a single
callable and all the dependencies necessary to produce that resource.
When thinking of the dependency graph, this represents a vertex and
all in-edges. This is a local-information representation that can be
used to construct the entire dependency graph once all resources are
specified.
"""
def __init__(
self,
resource_type: str,
resource_names: List[str],
producer: Callable,
dependencies: List[str],
):
self._resource_type = resource_type
self._resource_names = resource_names
self._producer = producer
self._dependencies = dependencies
@property
def type(self) -> str:
"""The type of resource produced by this resource group's producer.
Must be one of `RESOURCE_TYPES`.
"""
return self._resource_type
@property
def names(self) -> List[str]:
"""The long names (including type) of all resources in this group."""
return [f"{self._resource_type}.{name}" for name in self._resource_names]
@property
def producer(self) -> Any:
"""The method or object that produces this group of resources."""
return self._producer
@property
def dependencies(self) -> List[str]:
"""The long names (including type) of dependencies for this group."""
return self._dependencies
def __iter__(self) -> Iterable[str]:
return iter(self.names)
def __repr__(self) -> str:
resources = ", ".join(self)
return f"ResourceProducer({resources})"
def __str__(self) -> str:
resources = ", ".join(self)
return f"({resources})"
class ResourceManager:
"""Manages all the resources needed for population initialization."""
def __init__(self):
# This will be a dict with string keys representing the the resource
# and the resource group they belong to. This is a one to many mapping
# as some resource groups contain many resources.
self._resource_group_map = {}
# null producers are those that don't produce any resources externally
# but still consume other resources (i.e., have dependencies) - these
# are only pop initializers as of 9/26/2019. Tracker is here to assign
# them unique ids.
self._null_producer_count = 0
# Attribute used for lazy (but cached) graph initialization.
self._graph = None
# Attribute used for lazy (but cached) graph topo sort.
self._sorted_nodes = None
@property
def name(self) -> str:
"""The name of this manager."""
return "resource_manager"
@property
def graph(self) -> nx.DiGraph:
"""The networkx graph representation of the resource pool."""
if self._graph is None:
self._graph = self._to_graph()
return self._graph
@property
def sorted_nodes(self):
"""Returns a topological sort of the resource graph.
Notes
-----
Topological sorts are not stable. Be wary of depending on order
where you shouldn't.
"""
if self._sorted_nodes is None:
try:
self._sorted_nodes = list(nx.algorithms.topological_sort(self.graph))
except nx.NetworkXUnfeasible:
raise ResourceError(
f"The resource pool contains at least one cycle: "
f"{nx.find_cycle(self.graph)}."
)
return self._sorted_nodes
def add_resources(
self,
resource_type: str,
resource_names: List[str],
producer: Any,
dependencies: List[str],
):
"""Adds managed resources to the resource pool.
Parameters
----------
resource_type
The type of the resources being added. Must be one of
`RESOURCE_TYPES`.
resource_names
A list of names of the resources being added.
producer
A method or object that will produce the resources.
dependencies
A list of resource names formatted as
``resource_type.resource_name`` that the producer requires.
Raises
------
ResourceError
If either the resource type is invalid, a component has multiple
resource producers for the ``column`` resource type, or
there are multiple producers of the same resource.
"""
if resource_type not in RESOURCE_TYPES:
raise ResourceError(
f"Unknown resource type {resource_type}. "
f"Permitted types are {RESOURCE_TYPES}."
)
resource_group = self._get_resource_group(
resource_type, resource_names, producer, dependencies
)
for resource in resource_group:
if resource in self._resource_group_map:
other
|
_producer = self._resource_group_map[resource]
|
.producer
raise ResourceError(
f"Both {producer} and {other_producer} are registered as "
f"producers for {resource}."
)
self._resource_group_map[resource] = resource_group
def _get_resource_group(
self,
resource_type: str,
resource_names: List[str],
producer: MethodType,
dependencies: List[str],
) -> ResourceGroup:
"""Packages resource information into a resource group.
See Also
--------
:class:`ResourceGroup`
"""
if not resource_names:
# We have a "producer" that doesn't produce anything, but
# does have dependencies. This is necessary for components that
# want to track private state information.
resource_type = NULL_RESOURCE_TYPE
resource_names = [str(self._null_producer_count)]
self._null_producer_count += 1
return ResourceGroup(resource_type, resource_names, producer, dependencies)
def _to_graph(self) -> nx.DiGraph:
"""Constructs the full resource graph from information in the groups.
Components specify local dependency information during setup time.
When the resources are required at population creation time,
the graph is generated as all resources must be registered at that
point.
Notes
-----
We are taking advantage of lazy initialization to sneak this in
between post setup time when the :class:`values manager
<vivarium.framework.values.ValuesManager>` finalizes pipeline
dependencies and population creation time.
"""
resource_graph = nx.DiGraph()
# networkx ignores duplicates
resource_graph.add_
|
tensorflow/gan
|
tensorflow_gan/examples/progressive_gan/layers.py
|
Python
|
apache-2.0
| 9,208
| 0.005213
|
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for a progressive GAN model.
This module contains basic building blocks to build a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples import compat_utils
def pixel_norm(images, epsilon=1.0e-8):
"""Pixel normalization.
For each pixel a[i,j,k] of image in HWC format, normalize its value to
b[i,j,k] = a[i,j,k] / SQRT(SUM_k(a[i,j,k]^2) / C + eps).
Args:
images: A 4D `Tensor` of NHWC format.
epsilon: A small positive number to avoid division by zero.
Returns:
A 4D `Tensor` with pixel-wise normalized channels.
"""
return images * tf.math.rsqrt(
tf.reduce_mean(input_tensor=tf.square(images), axis=3, keepdims=True) +
epsilon)
def _get_validated_scale(scale):
"""Returns the scale guaranteed to be a positive integer."""
scale = int(scale)
if scale <= 0:
raise ValueError('`scale` must be a positive integer.')
return scale
def downscale(images, scale):
"""Box downscaling of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` down scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.nn_avg_pool2d(
input=images,
ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
def upscale(images, scale):
"""Box upscaling (also called nearest neighbors) of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.batch_to_space(
input=tf.tile(images, [scale**2, 1, 1, 1]),
crops=[[0, 0], [0, 0]],
block_shape=scale)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: A `Tensor` for which to compute the standard deviation average. The first
dimension must be batch size.
Returns:
A scalar `Tensor` which is the mean variance of variable x.
"""
mean, var = tf.nn.moments(x=x, axes=[0])
del mean
return tf.reduce_mean(input_tensor=tf.sqrt(var))
def scalar_concat(tensor, scalar):
"""Concatenates a scalar to the last dimension of a tensor.
Args:
tensor: A `Tensor`.
scalar: a scalar `Tensor` to concatenate to tensor `tensor`.
Returns:
A `Tensor`. If `tensor` has shape [...,N], the result R has shape
[...,N+1] and R[...,N] = scalar.
Raises:
ValueError: If `tensor` is a scalar `Tensor`.
"""
ndims = tensor.shape.ndims
if ndims < 1:
raise ValueError('`tensor` must have number of dimensions >= 1.')
shape = tf.shape(input=tensor)
return tf.concat(
[tensor,
tf.ones([shape[i] for i in range(ndims - 1)] + [1]) * scalar],
axis=ndims - 1)
def he_initializer_scale(shape, slope=1.0):
"""The scale of He neural network initializer.
Args:
shape: A list of ints representing the dimensions of a tensor.
slope: A float representing the slope of the ReLu following the layer.
Returns:
A float of he initializer scale.
"""
fan_in = np.prod(shape[:-1])
return np.sqrt(2. / ((1. + slope**2) * fan_in))
def _custom_layer_impl(apply_kernel, kernel_shape, bias_shape, activation,
he_initializer_slope, use_weight_scaling):
"""Helper function to implement custom_xxx layer.
Args:
apply_kernel: A function that transforms kernel to output.
kernel_shape: An integer tuple or list of the kernel shape.
bias_shape: An integer tuple or list of the bias shape.
activation: An activation function to be applied. None means no activation.
he_initializer_slope: A float slope for the He initializer.
use_weight_scaling: Whether to apply weight scaling.
Returns:
A `Tensor` computed as apply_kernel(kernel) + bias where kernel is a
`Tensor` variable with shape `kernel_shape`, bias is a `Tensor` variable
with shape `bias_shape`.
"""
kernel_scale = he_initializer_scale(kernel_shape, he_initializer_slope)
init_scale, post_scale = kernel_scale, 1.0
if use_weight_scaling:
init_scale, post_scale = post_scale, init_scale
kernel_initializer = tf.random_normal_initializer(stddev=init_scale)
bias = tf.get_variable(
'bias', shape=bias_shape, initializer=tf.zeros_initializer())
output = post_scale * apply_kernel(kernel_shape, kernel_initializer) + bias
if activation is not None:
output = activation(output)
return output
def custom_conv2d(x,
filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=None,
he_initializer_slope=1.0,
use_weigh
|
t_scaling=True,
scope='custom_conv2d',
reuse=None):
"""Custom conv2d layer.
In comparison with tf.layers.conv2d this implementation use the He initializer
to initialize convolutional kernel and the wei
|
ght scaling trick (if
`use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor` of NHWC format.
filters: An int of output channels.
kernel_size: An integer or a int tuple of [kernel_height, kernel_width].
strides: A list of strides.
padding: One of "VALID" or "SAME".
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` of NHWC format where the last dimension has size `filters`.
"""
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size] * 2
kernel_size = list(kernel_size)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.conv2d(
x,
filters=filters,
kernel_size=kernel_shape[0:2],
strides=strides,
padding=padding,
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=kernel_size + [x.shape.as_list()[3], filters],
bias_shape=(filters,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
def custom_dense(x,
units,
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_dense',
reuse=None):
"""Custom dense layer.
In comparison with tf.layers.dense This implementation use the He
initializer to initialize weights and the weight scaling trick
(if `use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `T
|
WorldBank-Transport/open-transit-indicators
|
python/django/datasources/tasks/osm.py
|
Python
|
gpl-3.0
| 4,966
| 0.001611
|
"""Handles downloading and importing OSM Data"""
import os
import subprocess
import tempfile
import requests
from celery.utils.log import get_task_logger
from django.conf import settings
from django.db import connection
from datasources.models import OSMData, OSMDataProblem
from datasources.tasks.shapefile import ErrorFactory
# Note: The download is done using the overpass API
# (see:http://wiki.openstreetmap.org/wiki/Overpass_API) because
# we may be downloading large files and these endpoints are optimized
# for downloads/reads unlike the main openstreetmap API endpoint
OSM_API_URL = 'http://www.overpass-api.de/api/xapi?way[bbox=%s,%s,%s,%s][highway=*]'
# set up shared task logger
logger = get_task_logger(__name__)
def run_osm_import(osmdata_id):
"""Download and run import step for OSM data
Downloads and stores raw OSM data within a bounding box defined
by imported GTFS data. Uses the SRID defined on the gtfs_stops
table to determine correct UTM projection to import data as.
Uses Raw SQL to
- get extent from GTFS data since we
do not have models that keeps track of GTFS Data
- get UTM projection to import OSM data as correct projection
"""
logger.debug('Starting OSM import')
osm_data = OSMData.objects.get(pk=osmdata_id)
osm_data.status = OSMData.Statuses.PROCESSING
error_factory = ErrorFactory(OSMDataProblem, osm_data, 'osmdata')
def handle_error(title, description):
"""Helper method to handle shapefile errors."""
error_factory.error(title, description)
osm_data.status = OSMData.Statuses.ERROR
osm_data.save()
return
with connection.cursor() as c:
try:
# Get the bounding box for gtfs data
# split components to make it easier to parse the sql response
bbox_query = """
SELECT MIN(ST_Xmin(the_geom)),
MIN(ST_Ymin(the_geom)),
MAX(ST_Xmax(the_geom)),
MAX(ST_Ymax(the_geom))
FROM gtfs_stops;"""
logger.debug('Making query for bounding box from gtfs stops')
c.execute(bbox_query)
bbox = c.fetchone()
except Exception as e:
err_msg = 'Error obtaining bounding box from gtfs_stops table'
handle_error(err_msg, e.message)
try:
logger.debug('Making query for UTM projection srid from gtfs_stops table (geom field)')
utm_projection_query = "SELECT FIND_SRID('', 'gtfs_stops', 'geom');"
c.execute(utm_projection_query)
utm_projection = c.fetchone()[0]
except Exception as e:
|
err_msg = 'Error obtaining SRID from gtfs_stops table'
logger.exception(err_msg)
handle_error(err_msg, e.message)
_, temp_filename = tempfile.mkstemp()
logger.debug('Generated tempfile %s to download osm data into', te
|
mp_filename)
osm_data.source_file = temp_filename
osm_data.status = OSMData.Statuses.DOWNLOADING
osm_data.save()
try:
response = requests.get(OSM_API_URL % bbox, stream=True)
logger.debug('Downloading OSM data from overpass/OSM api')
# Download OSM data
with open(temp_filename, 'wb') as fh:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fh.write(chunk)
fh.flush()
logger.debug('Finished downloading OSM data')
osm_data.status = OSMData.Statuses.IMPORTING
osm_data.save()
except Exception as e:
err_msg = 'Error downloading data'
logger.exception('Error downloading data')
handle_error(err_msg, e.message)
# Get Database settings
db_host = settings.DATABASES['default']['HOST']
db_password = settings.DATABASES['default']['PASSWORD']
db_user = settings.DATABASES['default']['USER']
db_name = settings.DATABASES['default']['NAME']
env = os.environ.copy()
env['PGPASSWORD'] = db_password
# Insert OSM Data into Database with osm2pgsql command
osm2pgsql_command = ['osm2pgsql',
'-U', db_user,
'-H', db_host,
'-d', db_name,
'-s', # use slim mode to cache to DB rather than in-memory
'-E', str(utm_projection),
temp_filename]
try:
logger.debug('Running OSM import command %s', ' '.join(osm2pgsql_command))
subprocess.check_call(osm2pgsql_command, env=env)
osm_data.status = OSMData.Statuses.COMPLETE
except subprocess.CalledProcessError as e:
osm_data.status = OSMData.Statuses.ERROR
err_msg = 'Error running osm2pgsql command'
logger.exception('Error running osm2pgsql command')
error_factory.error(err_msg, e.message)
finally:
osm_data.save()
os.remove(temp_filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.