repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
youtube/cobalt | third_party/v8/tools/release/PRESUBMIT.py | Python | bsd-3-clause | 378 | 0.005291 | # Copyright 2017 the V8 | project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def CheckChangeOnCommit(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, '.', files_to_check=['test_sc | ripts.py$'])
return input_api.RunTests(tests)
|
staranjeet/fjord | fjord/search/tests/test_index.py | Python | bsd-3-clause | 1,072 | 0 | from fjord.base.tests import TestCase
from fjord.feedback.models import ResponseDocType
from fjord.feedback.tests import ResponseFactory
from fjord.search.index import chunked
from fjord.search.tests import ElasticTestCase
class ChunkedTests(TestCase):
def test_chunked(self):
# chunking nothing yields nothing.
assert list(chunked([], 1)) == []
# chunking list where len(list) < n
assert list(chunked([1], 10)) == [(1,)]
# chunking a list where len(list) == n
assert list(chunked([1, 2], 2)) == | [(1, 2) | ]
# chunking list where len(list) > n
assert list(chunked([1, 2, 3, 4, 5], 2)) == [(1, 2), (3, 4), (5,)]
class TestLiveIndexing(ElasticTestCase):
def test_live_indexing(self):
search = ResponseDocType.docs.search()
count_pre = search.count()
s = ResponseFactory(happy=True, description='Test live indexing.')
self.refresh()
assert count_pre + 1 == search.count()
s.delete()
self.refresh()
assert count_pre == search.count()
|
androbwebb/JenniferVirtualAssistant | lessons/base/decorators.py | Python | mit | 196 | 0.005102 | def intent(func):
"""
This is us | ed inside the intent parser in `lessons.base.plugin._in | tent_dictionary()`
:param func:
:return:
"""
func.decorator = intent
return func
|
devsim/devsim | testing/noise_res.py | Python | apache-2.0 | 2,229 | 0.004935 | # Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o | r agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# solid state resistor ssac
import devsim
import res1
import test_common
devsim.circuit_element(name="V1", n1="topbias", n2=0, acreal=1.0)
test_common.CreateNoiseMesh(res1.device, res1.region)
devsim.set_paramete | r(name="botbias", value=0.0)
res1.run_initial_bias(use_circuit_bias=True, net_doping=1e17)
for v in (0.0, 1e-3):
devsim.circuit_alter(name="V1", value=v)
devsim.solve(type="dc", absolute_error=1e10, relative_error=1e-7, maximum_iterations=30)
for contact in res1.contacts:
test_common.printResistorCurrent(device=res1.device, contact=contact)
devsim.solve(type="dc", absolute_error=1e10, relative_error=1e-7, maximum_iterations=30)
devsim.solve(type="noise", frequency=1e5, output_node="V1.I")
devsim.vector_gradient(device=res1.device, region=res1.region, node_model="V1.I_ElectronContinuityEquation_real", calc_type="avoidzero")
for name in (
"V1.I_ElectronContinuityEquation_real",
"V1.I_ElectronContinuityEquation_imag",
"V1.I_ElectronContinuityEquation_real_gradx",
"V1.I_ElectronContinuityEquation_imag_gradx",
):
devsim.print_node_values(device=res1.device, region=res1.region, name=name)
rv="V1.I_ElectronContinuityEquation_real_gradx"
iv="V1.I_ElectronContinuityEquation_imag_gradx"
for name, equation in (
("noisesource", "4*ElectronCharge^2 * ThermalVoltage * mu_n * Electrons"),
("vfield", "(%(rv)s*%(rv)s+%(iv)s*%(iv)s)" % {'rv' : rv, 'iv' : iv}),
("noise", "vec_sum(vfield * noisesource * NodeVolume)"),
):
devsim.node_model(device=res1.device, region=res1.region, name=name, equation=equation)
devsim.print_node_values(device=res1.device, region=res1.region, name=name)
|
ginabythebay/iddocs | apache_auth/management/commands/rewrite_htpasswd.py | Python | apache-2.0 | 618 | 0.001618 | from django.core.management.base import BaseCommand, CommandError
from apache_auth.middleware import rewrite_htpasswd
class Command(BaseCommand):
help = 'Rewrites the htpass | wd file'
def handle(self, *args, **options):
written_users = rewrite_htpasswd()
verbosity = o | ptions['verbosity']
if verbosity >= 2:
self.stdout.write('Wrote the following users:')
for u in written_users:
self.stdout.write(' %s' % u)
if verbosity >= 1:
self.stdout.write(self.style.SUCCESS(
'%s users written' % len(written_users)))
|
zhaochl/python-utils | random_simulators/simulate_user.py | Python | apache-2.0 | 1,284 | 0.017134 | #!/usr/bin/env python
# coding=utf-8
from constant import *
from random import *
from simulate_util import *
from pdb import *
def rd_cn_name():
_name =''
xing_index = randint(0,98)
#print xing_index
xing = CONST_XING[xing_index]
#print xing
_name = xing
for i in range(randint(1,2)):
ming = random_chinese_str()
_name+=ming
return _name
def gen_name():
name=''
x = randint(0,98)
xing = CONST_XING[x]
ming_list = CONST_MING_MAN+CONST_MING_WOMEN
m = randint(0,len(ming_list)-1)
ming = ming_list[m]
name+=xing+ming
return name
def rd_en_name(randomlength=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789_'
length = len(chars) - 1
random = Random()
for i in range(randomlength):
str+=chars[random.randint(0, length)]
return str
def rd_user_action():
todo_len = len(CONST_ACTION_LIST)
action_index = randint(0,todo_len-1)
return CONST_AC | TION_LIST[action_index]
if __name__=='__main__':
#print 'main'
num=5
for i in range(n | um):
#cn_name= rd_cn_name()
cn_name= gen_name()
en_name = rd_en_name()
action = rd_user_action()
r = cn_name+","+"("+en_name+")"+action
print r
|
squilter/MAVProxy | MAVProxy/modules/mavproxy_smartcamera/__init__.py | Python | gpl-3.0 | 16,290 | 0.006691 | #!/usr/bin/env python
#***************************************************************************
# Copyright Jaime Machuca
#***************************************************************************
# Title : mavproxy_smartcamera.py
#
# Description : This file is intended to be added as a module to MAVProxy,
# it is intended to be used to control smart cameras that are
# connected to a companion computer. It reads MAVlink commands
# and uses them to control the cameras attached. The module
# reads a configuration file called smart_camera.cnf that tells
# it what cameras are connected, it then tries to connect to the
# cameras and populates a list of available cameras.
#
# Environment : Python 2.7 Code. Intended to be included in MAVproxy as a Module
#
# Responsible : Jaime Machuca
#
# License : CC BY-NC-SA
#
# Editor Used : Xcode 6.1.1 (6A2008a)
#
#****************************************************************************
#****************************************************************************
# HEADER-FILES (Only those that are needed in this file)
#****************************************************************************
# System Header files and Module Headers
import time, math, sched
# Module Dependent Headers
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib.mp_settings import MPSetting
# Own Headers
from sc_webcam import SmartCameraWebCam
from sc_SonyQX1 import SmartCamera_SonyQX
import sc_config
#****************************************************************************
# LOCAL DEFINES
#****************************************************************************
#****************************************************************************
# Class name : SmartCameraModule
#
# Public Methods : init
# mavlink_packet
#
# Private Methods : __vRegisterCameras
# __vCmdCamTrigger
#
#****************************************************************************
class SmartCameraModule(mp_module.MPModule):
#****************************************************************************
# Method Name : __init__ Class Initializer
#
# Description : Initializes the class
#
# Parameters : mpstate
#
# Return Value : None
#
# Author : Jaime Machuca
#
#****************************************************************************
def __init__(self, mpstate):
super(SmartCameraModule, self).__init__(mpstate, "SmartCamera", "SmartCamera commands")
self.add_command('camtrigger', self.__vCmdCamTrigger, "Trigger camera")
self.add_command('connectcams', self.__vCmdConnectCameras, "Connect to Cameras")
self.add_command('setCamISO', self.__vCmdSetCamISO, "Set Camera ISO")
self.add_command('setCamAperture', self.__vCmdSetCamAperture, "Set Camera Aperture")
self.add_command('setCamShutterSpeed', self.__vCmdSetCamShutterSpeed, "Set Camera Shutter Speed")
self.add_command('setCamExposureMode', self.__vCmdSetCamExposureMode, "Set Camera Exposure Mode")
self.CamRetryScheduler = sched.scheduler(time.time, time.sleep)
self.ProgramAuto = 1
self.Aperture = 2
self.Shutter = 3
self.Manual = 4
self.IntelligentAuto = 5
self.SuperiorAuto = 6
self.WirelessPort = sc_config.config.get_string("general", 'WirelessPort', "wlan0")
| self.u8RetryTimeout = 0
self.u8MaxRetries = 5
self.__vRegisterCameras()
#****************************************************************************
# Method Name : __vRegisterQXCamera
#
# Description : Tries to connect to a QX camera on the specified Wireless
# port. If no camera is found it will retry every 5 seconds
# until u8MaxRetries is reached.
#
# Parameters : None
#
# Return Value : None
#
# | Author : Jaime Machuca
#
#****************************************************************************
def __vRegisterQXCamera(self,u8CamNumber):
if (self.u8RetryTimeout < self.u8MaxRetries):
new_camera = SmartCamera_SonyQX(u8CamNumber, self.WirelessPort)
if new_camera.boValidCameraFound() is True:
self.camera_list = self.camera_list + [new_camera]
print("Found QX Camera")
else:
print("No Valid Camera Found, retry in 5 sec")
self.u8RetryTimeout = self.u8RetryTimeout + 1
self.CamRetryScheduler.enter(5, 1, self.__vRegisterQXCamera, [u8CamNumber])
self.CamRetryScheduler.run()
else:
print("Max retries reached, No QX Camera Found")
self.u8RetryTimeout = 0
#****************************************************************************
# Method Name : __vRegisterCameras
#
# Description : Creates camera objects based on camera-type configuration
#
# Parameters : None
#
# Return Value : None
#
# Author : Jaime Machuca
#
#****************************************************************************
def __vRegisterCameras(self):
# initialise list
self.camera_list = []
#look for up to 2 cameras
for i in range(0,2):
config_group = "camera%d" % i
camera_type = sc_config.config.get_integer(config_group, 'type', 0)
# webcam
if camera_type == 1:
new_camera = SmartCameraWebCam(i)
self.camera_list = self.camera_list + [new_camera]
# Sony QX1
if camera_type == 2:
self.__vRegisterQXCamera(i)
# display number of cameras found
print ("cameras found: %d" % len(self.camera_list))
#****************************************************************************
# Method Name : __vCmdCamTrigger
#
# Description : Triggers all the cameras and stores Geotag information
#
# Parameters : None
#
# Return Value : None
#
# Author : Jaime Machuca
#
#****************************************************************************
def __vCmdCamTrigger(self, args):
'''Trigger Camera'''
#print(self.camera_list)
for cam in self.camera_list:
cam.take_picture()
print("Trigger Cam %s" % cam)
#****************************************************************************
# Method Name : __vCmdConnectCameras
#
# Description : Initiates connection to cameras
#
# Parameters : None
#
# Return Value : None
#
# Author : Jaime Machuca
#
#****************************************************************************
def __vCmdConnectCameras(self, args):
'''ToDo: Validate the argument as a valid port'''
if len(args) >= 1:
self.WirelessPort = args[0]
print ("Connecting to Cameras on %s" % self.WirelessPort)
self.__vRegisterCameras()
#****************************************************************************
# Method Name : __vCmdSetCamExposureMode
#
# Description : Sets the camera exposure mode
#
# Parameters : Exposure Mode, Cam number
# Valid values are Program Auto, Aperture, Shutter, Manual
# Intelligent Auto, Superior Auto
#
# Return Value : None
#
# Author : Jaime Machuca
#
#****************************************************************************
def __vCmdSetCamExposureMode(self, args):
'''ToDo: Validate CAM number and Valid Mode Values'''
if len(args) == 1:
for cam in self.camera_list:
cam.boSetExposureMode(args[0])
elif len(args) == 2:
cam = self.camera_list[int(args[1])]
cam.boSetExposureMode(args[0])
else:
print ("Usage: setCamExposureMode MODE [CAMNUMBER], Valid values for MODE: Program Auto, Aperture, Shutter, Manual, Intelligent Auto, Superior Auto")
#** |
takeshineshiro/horizon | openstack_dashboard/api/rest/__init__.py | Python | apache-2.0 | 1,343 | 0.006701 | # Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limita | tions under the License.
"""This package holds the REST API that supports the Horizon dashboard
Javascript code.
It is not intended to be used outside of Horizon, and makes no promises of
stability or fitness for purpose outside of that scope.
It does not promise to adhere to the general OpenStack API Guidelines set out
in https://wiki.openstack.org/wiki/APIChangeGuidelines.
"""
# import REST API modules here
from . import cinder #fla | ke8: noqa
from . import config #flake8: noqa
from . import glance #flake8: noqa
from . import heat #flake8: noqa
from . import keystone #flake8: noqa
from . import network #flake8: noqa
from . import neutron #flake8: noqa
from . import nova #flake8: noqa
from . import policy #flake8: noqa
|
mahajrod/MACE | MACE/Routines/Drawing.py | Python | apache-2.0 | 49,658 | 0.005981 | #!/usr/bin/env python
import os
import math
from collections import Iterable, OrderedDict
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
from matplotlib.patches import Rectangle
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import cm
from matplotlib import colors
from matplotlib import text
from RouToolPa.GeneralRoutines import FileRoutines
from MACE.Functions.Generators import recursive_generator
from RouToolPa.Collections.General import TwoLvlDict
class DrawingRoutines:
@staticmethod
def millions(x, pos):
return '%1.1fMbp' % (x*1e-6)
@staticmethod
def billions(x, pos):
return '%1.1fGbp' % (x*1e-9)
@staticmethod
def get_filtered_scaffold_list(count_dict,
scaffold_black_list=[],
sort_scaffolds=False,
scaffold_ordered_list=None,
scaffold_white_list=[],
sample_level=True):
white_set = set(scaffold_white_list)
black_set = set(scaffold_black_list)
scaffold_set = set()
if sample_level:
for sample in count_dict:
scaffold_set |= set(count_dict[sample])
else:
scaffold_set |= set(count_dict)
if white_set:
#print "XXXXXXXXXXXXXX"
#print scaffold_set
#print "YYYYYYYYYYYYYYYY"
#print white_set
scaffold_set = scaffold_set & white_set
#print "ZZZZZZZZZZZZZZZZ"
#print scaffold_set
if black_set:
#print "WWWWWWWWWWWW"
#print black_set
scaffold_set = scaffold_set - black_set
#print "QQQQQQQQQQQQQQQQ"
#print black_set
#print "RRRRRRRRRRRRRRRRRR"
#print scaffold_set
scaffold_l | ist = list(scaffold_set)
#print "PPPPPPPPPPP"
#print scaffold_list
if sort_scaffolds:
scaffold_list.sort()
final_scaffold_list = []
if scaffold_ordered_list:
for en | try in scaffold_ordered_list:
if entry in scaffold_list:
final_scaffold_list.append(entry)
scaffold_list.remove(entry)
else:
print("WARNING!!!Entry(%s) from order list is absent in list of scaffolds!" % entry)
final_scaffold_list = final_scaffold_list + scaffold_list
else:
final_scaffold_list = scaffold_list
return final_scaffold_list
def draw_variant_window_densities(self, count_dict, scaffold_length_dict, window_size, window_step, output_prefix,
masking_dict=None,
gap_fraction_threshold=0.4,
record_style=None, ext_list=("svg", "png"),
label_fontsize=13, left_offset=0.2, figure_width=12,
figure_height_scale_factor=0.5, scaffold_synonym_dict=None,
id_replacement_mode="partial", suptitle=None, density_multiplicator=1000,
scaffold_black_list=[], sort_scaffolds=False, scaffold_ordered_list=None,
scaffold_white_list=[], add_sample_name_to_labels=False,
dist_between_scaffolds_scaling_factor=1,
gap_color="grey",
masked_color="grey", no_snp_color="white",
colormap=None,
colors=("#333a97", "#3d3795","#5d3393", "#813193", "#9d2d7f", "#b82861",
"#d33845", "#ea2e2e", "#f5ae27"),
thresholds=(0.0, 0.1, 0.5, 0.75, 1.0, 1.25, 1.5, 2.0, 2.5),
colormap_tuple_list=((0.0, "#333a97"), (0.1, "#3d3795"), (0.5, "#5d3393"),
(0.75, "#813193"), (1.0, "#9d2d7f"), (1.25, "#b82861"),
(1.5, "#d33845"), (2.0, "#ea2e2e"), (2.5, "#f5ae27"))):
""" cont_dict = {sample: {scaffold: }}"""
if dist_between_scaffolds_scaling_factor < 1:
raise ValueError("Scaling factor for distance between scaffolds have to be >=1.0")
final_scaffold_list = self.get_filtered_scaffold_list(count_dict,
scaffold_black_list=scaffold_black_list,
sort_scaffolds=sort_scaffolds,
scaffold_ordered_list=scaffold_ordered_list,
scaffold_white_list=scaffold_white_list)
scaffold_number = len(final_scaffold_list)
max_scaffold_length = max([scaffold_length_dict[scaf] for scaf in final_scaffold_list])
#max_scaffold_length = max(scaffold_length_dict.values())
figure = plt.figure(figsize=(figure_width,
int(figure_height_scale_factor * scaffold_number * len(count_dict))))
subplot = plt.subplot(1, 1, 1)
subplot.get_yaxis().set_visible(False)
#subplot.get_xaxis().set_visible(False)
#axes.xaxis.set_major_formatter(x_formatter)
#subplot.spines['bottom'].set_color('none')
subplot.spines['right'].set_color('none')
subplot.spines['left'].set_color('none')
subplot.spines['top'].set_color('none')
scaffold_height = 10
dist_between_scaffolds = 5
start_x = 0
start_y = - dist_between_scaffolds
label_line_y_shift = int(scaffold_height/2)
label_line_y_jump = int(scaffold_height/2)
#normalize_color_func = LinearSegmentedColormap.from_list("Densities_custom", colormap_tuple_list)
#plt.register_cmap(cmap=colormap)
#colormap = cm.get_cmap(name="plasma", lut=None)
#normalize_colors = colors.BoundaryNorm(boundaries_for_colormap, len(boundaries_for_colormap) - 1) * int(256/(len(boundaries_for_colormap) - 1))
#normalize_colors = colors.Normalize(vmin=boundaries_for_colormap[0], vmax=boundaries_for_colormap[-1])
masked_windows_count_dict = TwoLvlDict()
no_snps_windows_count_dict = TwoLvlDict()
for sample in count_dict:
masked_windows_count_dict[sample] = OrderedDict()
no_snps_windows_count_dict[sample] = OrderedDict()
if colormap:
cmap = plt.get_cmap(colormap, len(thresholds))
masked_regions_fd = open("%s.masked_regions" % output_prefix, "w")
masked_regions_fd.write("#scaffold\twindow\tmasked_position\tmasked_position,fraction\n")
for scaffold in final_scaffold_list:
sample_index = 0
for sample in count_dict:
masked_windows_count_dict[sample][scaffold] = 0
no_snps_windows_count_dict[sample][scaffold] = 0
#if scaffold in scaffold_black_list:
# continue
#print gap_coords_list, gap_len_list
start_y += scaffold_height + dist_between_scaffolds * (dist_between_scaffolds_scaling_factor if sample_index == 0 else 1)
label_y_start = label_line_y_shift + start_y
gap_y_jump = label_y_start + label_line_y_jump
prev_x = 0
#figure.text(0, start_y, scaffold, rotation=0, fontweight="bold", transform=subplot.transAxes, fontsize=9,
# horizontalalignment='center',
# verticalalignment='center')
if scaffold_synonym_dict:
if id_replacement_mode == "exact":
if scaffold in scaffold_synonym_dict:
scaffold_label = scaffold_ |
kyelewisstgc/EventMaster-Python | tests/test_offline.py | Python | mit | 4,308 | 0.003714 | #Import Libraries
import eventmaster as EM
from time import sleep
import random
import sys
""" Create new Instance of EventMasterSwitcher and turn off logging """
s3 = EM.EventMasterSwitcher()
s3.setVerbose(0)
with open('example_settings_.xml', 'r') as content_file:
content = content_file.read()
s3.loadFromXML(content)
""" Enumerate all Inputs and print known information for each """
print("# Inputs")
for input_id, input_inst in s3.getInputs().items():
input_name = input_inst.getName()
frozen_string = "is Frozen" if input_inst.getFreeze() else "is not Frozen"
print(" ({0!s}) {1!s} {2!s}".format(input_id, input_name, frozen_string))
""" Enumerate all Outputs and print known information for each """
print("\r\n# Outputs")
for output_id, output_inst in s3.getOutputs().items():
output_name = output_inst.getName()
print(" ({0!s}) {1!s}".format(output_id, output_name))
""" Enumerate all Presets and print known information for each """
print("\r\n# Presets")
for preset_id, preset_inst in s3.getPresets().items():
preset_name = preset_inst.getName()
print(" ({0!s}) {1!s}".format(preset_id, preset_name))
""" Enumerate all Destinations and print known information for each """
print("\r\n# Destinations:")
for dest_id, dest_inst in s3.getScreenDests().items():
dest_numoflayers = len(dest_inst.getLayers())
dest_name = dest_inst.getName()
dest_size = dest_inst.getSize()
print("\n {1!s} is {2!s} x {3!s} & has {4!s} layer(s)".format( dest_id,
dest_name,
dest_size["HSize"],
dest_size["VSize"],
dest_numoflayers))
""" Enumerate all Layers for Destination and print known information for each """
for layer_number, layer_inst in dest_inst.getLayers().items():
if "Pvw" in layer_inst:
layer_name_pvw = layer_inst["Pvw"].getName()
layer_owin_pvw = layer_inst["Pvw"].getOWIN()
layer_hpos_pvw = layer_owin_pvw["HPos"]
layer_hsize_pvw = layer_owin_pvw["HSize"]
layer_vpos_pvw = layer_owin_pvw["VPos"]
layer_vsize_pvw = layer_owin_pvw["VSize"]
if layer_inst["Pvw"].getSource() is not None:
layer_source_name_pvw = layer_inst["Pvw"].getSource().getName()
else:
layer_source_name_pvw = "(Unknown)"
size_string_pvw = " {4!s} is on PVW - {0!s}x{1!s} at {2!s},{3!s}".format(layer_hsize_pvw, layer_vsize_pvw, layer_hpos_pvw, layer_vpos_pvw, layer_name_pvw)
source_string_pvw = " current source is {0!s}".format(layer_source_name_pvw)
else:
size_string_pvw = " Layer is not on PVW "
source_string_pvw = ""
if "Pgm" in layer_inst:
layer_name_pgm = layer_inst["Pgm"].getName()
layer_owin_pgm = layer_inst["Pgm"].getOWIN()
layer_hpos_pgm = layer_owin_pgm["HPos"]
layer_hsize_pgm = layer_owin_pgm["HSize"]
layer_vpos_pgm = layer_owin_pgm["VPos"]
layer_vsize_pgm = layer_owin_pgm["VSize"]
if layer_inst["Pgm"].getSource() is not None:
layer_source_name_pgm = layer_inst["Pgm"].getSource().getName()
else:
layer_source_name_pgm = "(Unknown)"
size_string_pgm = " {4!s} is on PGM - {0!s}x{1!s} at {2!s},{3!s}".format(layer_hsize_pgm, layer_vsize_pgm, layer_hpos_pgm, layer_vpos_pgm, layer_name_pgm)
source_string_pgm = " current source is {0!s}".format(layer_source_name_pgm)
else:
size_ | string_pgm = " Layer is not on PGM "
source_string_p | gm = ""
size_string = " {4!s} is on PGM - {0!s}x{1!s} at {2!s},{3!s}".format(layer_hsize_pgm, layer_vsize_pgm, layer_hpos_pgm, layer_vpos_pgm, layer_name_pgm)
source_string = " current source is {0!s}".format(layer_source_name_pgm)
print(" ({0!s}) {1!s}\n {2!s}\n {3!s}\n {4!s}".format(layer_number+1, size_string_pgm, source_string_pgm, size_string_pvw, source_string_pvw))
sys.exit()
|
simonspa/django-datacollect | datacollect/survey/migrations/0045_record_coords.py | Python | gpl-3.0 | 498 | 0.002008 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-21 15:44
from __future__ import unicode_literals
from django.db import migrations
import djgeojson.fields
class Migration( | migrations.Migration):
dependencies = [
('survey', '0044_auto_20160618_1412'),
]
operations = [
migrations.AddField(
model_name='record',
name='coords',
fie | ld=djgeojson.fields.PointField(blank=True, editable=False, null=True),
),
]
|
shivupa/pyci | methods/misc/fcidump.py | Python | gpl-3.0 | 2,153 | 0.020437 | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from functools import reduce
import numpy
from pyscf imp | ort gto, scf, ao2mo
from pyscf import tools
from pyscf import symm
'''
Write FCIDUMP file
'''
mol = gto.M(
atom = [['H', 0, 0, i] for i in range(6)],
basis = '6-31g',
verbose = 0,
symmetry = 1,
)
myhf = scf.RHF(mol)
myhf.kernel()
c = myhf.mo_coeff
#
# FCIDUMP for given 1e and 2e integrals
#
h1e = reduce(numpy.dot, (c.T, myhf.get_hcore(), c))
eri = ao2mo.kernel(mol, c)
tools.fcidump.fro | m_integrals('fcidump.example1', h1e, eri, c.shape[1],
mol.nelectron, ms=0)
print "shit",h1e
print "shit", eri
print "shit", c.shape[1]
#
# Bypass small matrix elements in FCIDUMP
#
tools.fcidump.from_integrals('fcidump.example2', h1e, eri, c.shape[1],
mol.nelectron, ms=0, tol=1e-10)
#
# Inculde the symmetry information in FCIDUMP
#
# to write the irreps for each orbital, first use pyscf.symm.label_orb_symm to
# get the irrep ids
MOLPRO_ID = {'D2h': { 'Ag' : 1,
'B1g': 4,
'B2g': 6,
'B3g': 7,
'Au' : 8,
'B1u': 5,
'B2u': 3,
'B3u': 2},
'C2v': { 'A1' : 1,
'A2' : 4,
'B1' : 2,
'B2' : 3},
'C2h': { 'Ag' : 1,
'Bg' : 4,
'Au' : 2,
'Bu' : 3},
'D2' : { 'A ' : 1,
'B1' : 4,
'B2' : 3,
'B3' : 2},
'Cs' : { "A'" : 1,
'A"' : 2},
'C2' : { 'A' : 1,
'B' : 2},
'Ci' : { 'Ag' : 1,
'Au' : 2},
'C1' : { 'A' : 1,}}
orbsym = [MOLPRO_ID[mol.groupname][i]
for i in symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c)]
tools.fcidump.from_integrals('fcidump.example3', h1e, eri, c.shape[1],
mol.nelectron, ms=0, orbsym=orbsym)
|
RomanZacharia/pyethapp | pyethapp/tests/test_genesis.py | Python | mit | 1,593 | 0.001255 | from pprint import pprint
import pytest
from ethereum import blocks
from ethereum.db import DB
from ethereum.config import Env
from pyethapp.utils import merge_dict
from pyethapp.config import update_config_from_genesis_json
import pyethapp.config as konfig
from pyethapp.profiles import PROFILES
def test_genesis_config():
"test setting genesis alloc using the config"
alloc = {'1' * 40: {'wei': 1}, # wei
'2' * 40: {'balance': 2}, # balance
'3' * 20: {'balance': 3}, # 20 bytes
}
config = dict(eth=dict(genesis=dict(alloc=alloc)))
konfig.update_config_with_defaults(config, {'eth': {'block': blocks.default_config}})
# Load genesis config
update_config_from_genesis_json(config, config['eth']['genesis'])
bc = config['eth']['block']
pprint(bc)
env = Env(DB(), bc)
genesis = blocks.genesis(env)
for address, value_dict in alloc.items():
value = value_dict.values()[0]
assert genesis.get_balanc | e(address) == value
@pytest.mark.parametrize('profile', PROFILES.keys())
def test_profile(profile):
config = dict( | eth=dict())
konfig.update_config_with_defaults(config, {'eth': {'block': blocks.default_config}})
# Set config values based on profile selection
merge_dict(config, PROFILES[profile])
# Load genesis config
update_config_from_genesis_json(config, config['eth']['genesis'])
bc = config['eth']['block']
pprint(bc)
env = Env(DB(), bc)
genesis = blocks.genesis(env)
assert genesis.hash.encode('hex') == config['eth']['genesis_hash']
|
travelbird/validictory | validictory/tests/test_type.py | Python | mit | 4,940 | 0 | from unittest import TestCase
from decimal import Decimal
import datetime
import sys
if sys.version_info[0] == 3:
unicode_str = '\u2603'
else:
unicode_str = unicode('snowman')
import validictory
class TestType(TestCase):
def test_schema(self):
schema = {
"type": [
{"type": "array", "minItems": 10},
{"type": "string", "pattern": "^0+$"}
]
}
data1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data2 = "0"
data3 = 1203
for x in [data1, data2]:
try:
validictory.validate(x, schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
self.assertRaises(ValueError, validictory.validate, data3, schema)
def _test_type(self, typename, valids, invalids):
for x in valids:
try:
validictory.validate(x, {"type": typename})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
for x in invalids:
self.assertRaises(ValueError, validictory.validate, x,
{"type": typename})
def test_integer(self):
valid_ints = [1, -89, 420000]
invalid_ints = [1.2, "bad", {"test": "blah"}, [32, 49], None, True]
self._test_type('integer', valid_ints, invalid_ints)
def test_string(self):
valids = ["abc", unicode_str]
invalids = [1.2, 1, {"test": "blah"}, [32, 49], None, True]
self._test_type('string', valids, invalids)
def test_number(self):
valids = [1.2, -89.42, 48, -32, Decimal('25.25')]
invalids = ["bad", {"test": "blah"}, [32.42, 494242], None, True]
self._test_type('number', valids, invalids)
def test_boolean(self):
valids = [True, False]
invalids = [1.2, "False", {"test": "blah"}, [32, 49], None, 1, 0]
self._test_type('boolean', valids, invalids)
def test_object(self):
valids = [{"blah": "test"}, {"this": {"blah": "test"}}, {1: 2, 10: 20}]
invalids = [1.2 | , "bad", 123, [32, 49], None, True]
| self._test_type('object', valids, invalids)
def test_array(self):
valids = [[1, 89], [48, {"test": "blah"}, "49", 42], (47, 11)]
invalids = [1.2, "bad", {"test": "blah"}, 1234, None, True]
self._test_type('array', valids, invalids)
def test_null(self):
valids = [None]
invalids = [1.2, "bad", {"test": "blah"}, [32, 49], 1284, True]
self._test_type('null', valids, invalids)
def test_any(self):
valids = [1.2, "bad", {"test": "blah"}, [32, 49], None, 1284, True]
self._test_type('any', valids, [])
def test_default(self):
# test default value (same as any really)
valids = [1.2, "bad", {"test": "blah"}, [32, 49], None, 1284, True]
for x in valids:
try:
validictory.validate(x, {})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_multi(self):
types = ["null", "integer", "string"]
valids = [None, 42, "string"]
invalids = [1.2, {"test": "blah"}, [32, 49], True]
self._test_type(types, valids, invalids)
self._test_type(tuple(types), valids, invalids)
class TestDisallow(TestType):
def _test_type(self, typename, valids, invalids):
for x in invalids:
try:
validictory.validate(x, {"disallow": typename})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
for x in valids:
self.assertRaises(ValueError, validictory.validate, x,
{"disallow": typename})
class DateValidator(validictory.validator.SchemaValidator):
def validate_type_date(self, value):
return isinstance(value, datetime.date)
def validate_type_datetime(self, value):
return isinstance(value, datetime.datetime)
class TestCustomType(TestCase):
def test_date(self):
self._test_type('date', [datetime.date.today()],
[2010, '2010'])
def test_datetime(self):
self._test_type('datetime', [datetime.datetime.now()],
[2010, '2010', datetime.date.today()])
def test_either(self):
self._test_type(['datetime', 'date'],
[datetime.date.today(), datetime.datetime.now()],
[2010, '2010'])
def _test_type(self, typename, valids, invalids):
validator = DateValidator()
for x in valids:
try:
validator.validate(x, {"type": typename})
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
for x in invalids:
self.assertRaises(ValueError, validator.validate, x,
{"type": typename})
|
Blub/pkgdepdb | pypkgdepdb/__init__.py | Python | bsd-3-clause | 29,250 | 0.005573 | import ctypes
from .common import *
from . import functions
from .functions import p_cfg, p_db, p_pkg, p_elf
from .utils import *
SONAME = 'libpkgdepdb.so.1'
class LogLevel(object):
Debug = 0
Message = 1
Print = 2
Warn = 3
Error = 4
class PkgEntry(object):
Depends = 0
OptDepends = 1
MakeDepends = 2
CheckDepends = 3
Provides = 4
Conflicts = 5
Replaces = 6
class JSON(object):
Query = 1
DB = 2
class ELF(object):
CLASSNONE = 0
CLASS32 = 1
CLASS64 = 2
DATANONE = 0
DATA2LSB = 1
DATA2MSB = 2
OSABI_NONE = 0
OSABI_HPUX = 1
OSABI_NETBSD = 2
OSABI_LINUX = 3
OSABI_HURD = 4
OSABI_86OPEN = 5
OSABI_SOLARIS = 6
OSABI_AIX = 7
OSABI_IRIX = 8
OSABI_FREEBSD = 9
OSABI_TRU64 = 10
OSABI_MODESTO = 11
OSABI_OPENBSD = 12
OSABI_OPENVMS = 13
OSABI_NSK = 14
OSABI_AROS = 15
OSABI_ARM = 97
OSABI_STANDALONE = 255
OSABI_SYSV = 0 # ELFOSABI_NONE
OSABI_MONTEREY = 7 # ELFOSABI_AIX
class lib(object):
pass
rawlib = ctypes.CDLL(SONAME, mode=ctypes.RTLD_GLOBAL)
if rawlib is None:
raise PKGDepDBException('failed to open %s' % SONAME)
functions.load(rawlib, lib, functions.pkgdepdb_functions, 'pkgdepdb_')
class Config(object):
def __init__(self):
self._ptr = lib.cfg_new()
if self._ptr is None:
raise PKGDepDBException('failed to create config instance')
def __del__(self):
lib.cfg_delete(self._ptr)
self._ptr = None
def load(self, filepath):
path = filepath.encode('utf-8')
if lib.cfg_load(self._ptr, path) == 0:
raise PKGDepDBException('failed to load from: %s' % (filepath))
def load_default(self):
if lib.cfg_load_default(self._ptr) == 0:
raise PKGDepDBException('failed to load default config')
def read(self, name, data):
data = cstr(data)
if lib.cfg_read(self._ptr, cstr(name), data, len(data)) != 1:
raise PKGDepDBException('failed to parse config')
database = StringProperty(lib.cfg_database, lib.cfg_set_database)
verbosity = IntProperty(lib.cfg_verbosity, lib.cfg_set_verbosity)
log_level = IntProperty(lib.cfg_log_level, lib.cfg_set_log_level)
max_jobs = IntProperty(lib.cfg_max_jobs, lib.cfg_set_max_jobs)
json = IntProperty(lib.cfg_json, lib.cfg_set_json)
quiet = BoolProperty(lib.cfg_quiet, lib.cfg_set_quiet)
package_depends = BoolProperty(lib.cfg_package_depends,
lib.cfg_set_package_depends)
package_file_lists = BoolProperty(lib.cfg_package_file_lists,
lib.cfg_set_package_file_lists)
package_info = BoolProperty(lib.cfg_package_info,
lib.cfg_set_package_info)
def __eq__(self, other):
return self._ptr[0] == other._ptr[0]
def __ne__(self, other):
return self._ptr[0] != other._ptr[0]
class DB(object):
class PackageList(object):
def __init__(self, owner):
self.owner = owner
def __len__(self):
return lib.db_package_count(self.owner._ptr)
def get(self, off=0, count=None):
if isinstance(off, str):
if count is not None:
raise ValueError('named access cannot have a count')
return self.get_named(off)
if off < 0: raise IndexError
maxcount = len(self)
if off >= maxcount: raise IndexError
count = count or maxcount
if count < 0: raise ValueError('cannot fetch a negative count')
count = min(count, maxcount - off)
out = (p_pkg * count)()
got = lib.db_package_get(self.owner._ptr, out, off, count)
return [Package(x,True) for x in out[0:got]]
def get_named(self, name):
ptr = lib.db_package_find(self.owner._ptr, cstr(name))
if ptr is None:
raise KeyError('no such package: %s' % (name))
return Package(ptr,True)
def __getitem__(self, key):
if isinstance(key, slice):
return self.__getslice__(key.start, key.stop, key.step)
if isinstance(key, str):
return self.get_named(key)
return self.get(key, 1)[0]
def __getslice__(self, start=None, stop=None, step=None):
step = step or 1
start = start or 0
count = stop - start if stop else None
if step == 0: raise ValueError('step cannot be zero')
if count == 0: return []
if step > 0:
if count < 0: return []
return self.get(start, count)[::step]
else:
if count > 0: return []
return self.get(start+count, -count)[-count:0:step]
def __contains__(self, value):
return value in self.get()
class ElfList(object):
def __init__(self, owner):
self.owner = owner
def __len__(self):
return lib.db_object_count(self.owner._ptr)
def get_named(self, name):
return [i for i in self.get() if i.basename == name][0]
def get(self, off=0, count=None):
if isinstance(off, str):
if count is not None:
raise ValueError('named access cannot have a count')
return self.get_named(off)
if off < 0: raise IndexError
maxcount = len(self)
if off >= maxcount: raise IndexError
count = count or maxcount
if count < 0: raise ValueError('cannot fetch a negative count')
count = min(count, maxcount - off)
out = (p_pkg * count)()
got = lib.db_object_get(self.owner._ptr, out, off, count)
return [Elf(x) for x in out[0:got]]
def __getitem__(self, key):
if isinstance(key, slice):
return self.__getslice__(key.start, key.stop, key.step)
if isinstance(key, str):
return self.get_named(key)
return self.get(key, 1)[0]
def __getslice__(self, start=None, stop=None, step=None):
step = step or 1
start = start or 0
count = stop | - start if stop else None
if step == 0: raise ValueError('step cannot be zero')
if coun | t == 0: return []
if step > 0:
if count < 0: return []
return self.get(start, count)[::step]
else:
if count > 0: return []
return self.get(start+count, -count)[-count:0:step]
def __contains__(self, value):
return value in self.get()
def __init__(self, cfg):
self._ptr = lib.db_new(cfg._ptr)
if self._ptr is None:
raise PKGDepDBException('failed to create database instance')
self._library_path = StringListAccess(self,
lib.db_library_path_count,
lib.db_library_path_get,
lib.db_library_path_add,
lib.db_library_path_contains,
lib.db_library_path_del_s,
lib.db_library_path_del_i,
lib.db_library_path_del_r,
lib.db_library_path_set_i)
self._ignored_files = StringListAccess(self,
lib.db_ignored_files_count,
lib.db_ignored_files_get,
lib.db_ignored_files_add,
lib.db_ignored_files_contains,
lib.db_ignore |
poliastro/poliastro | contrib/CR3BP/test_run_CR3BP.py | Python | mit | 6,277 | 0 | """
********************************************************************
Test file for implementation check of CR3BP library.
********************************************************************
Last update: 21/01/2022
Description
-----------
Contains a few sample orbit propagations to test the CR3BP library.
The orbits currently found in test file include:
- L2 southern NRHO (9:2 NRHO of Lunar Gateway Station)
- Distant Retrograde Orbit (DRO)
- Butterfly Orbit
- L2 Vertical Orbit
"""
# Testing CR3BP implementation
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from CR3BP import getChar_CR3BP, propagate, propagateSTM
from poliastro.bodies import Earth, Moon
# Earth-Moon system properties
k1 = Earth.k.to(u.km**3 / u.s**2).value
k2 = Moon.k.to(u.km**3 / u.s**2).value
r12 = 384747.99198 # Earth-Moon distance
# Compute CR3BP characterisitic values
mu, kstr, lstr, tstr, vstr, nstr = getChar_CR3BP(k1, k2, r12)
# -- Lunar Gateway Station Orbit - 9:2 NRHO
"""
The orbit is a Near-Rectilinear Halo Orbit (NRHO) around the L2 Lagragian
point of the Earth-Moon system. The orbit presented here is a southern
sub-family of the L2-NRHO. This orbit is 9:2 resonant orbit currenly set
as the candidate orbit for the Lunar Gateway Station (LOP-G). Its called
9:2 resonant since a spacecraft | would complete 9 orbits in the NRHO for
every 2 lunar month (slightly different from lunar orbit period).
The exact orbital elements presented here are from the auther's simulations.
The orbit states were obtained starting form guess solutions given in various
references. A few are provide | d below:
Ref: White Paper: Gateway Destination Orbit Model: A Continuous 15 Year NRHO
Reference Trajectory - NASA, 2019
Ref: Strategies for Low-Thrust Transfer Design Based on Direct Collocation
Techniques - Park, Howell and Folta
The NRHO are subfamily of the Halo orbits. The 'Near-Rectilinear' term comes
from the very elongated state of the orbit considering a regular Halo. Halo
orbits occur in all three co-linear equilibrum points L1,L2 and L3. They occur
in a pair of variants (nothern and southern) due to symmetry of CR3BP.
"""
# 9:2 L2 souther NRHO orbit
r0 = np.array([[1.021881345465263, 0, -0.182000000000000]])
v0 = np.array([0, -0.102950816739606, 0])
tf = 1.509263667286943
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("L2 Southern NRHO")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "b")
plt.show()
"""
All other orbits in this section are computed from guess solutions available
in Grebow's Master and PhD thesis. He lists a quite detailed set of methods
to compute most of the major periodic orbits I have presented here. All of
them use differntial correction methods which are not yet implemented in this
library.
Ref: GENERATING PERIODIC ORBITS IN THE CIRCULAR RESTRICTED THREEBODY PROBLEM
WITH APPLICATIONS TO LUNAR SOUTH POLE COVERAGE
- D.Grebow 2006 (Master thesis)
Ref: TRAJECTORY DESIGN IN THE EARTH-MOON SYSTEM
AND LUNAR SOUTH POLE COVERAGE
- D.Grebow 2010 (PhD desertation)
"""
# -- DRO orbit
# DRO orbit states
r0 = np.array([0.783390492345344, 0, 0])
v0 = np.array([0, 0.548464515316651, 0])
tf = 3.63052604667440
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("Distant Restrograde orbit (DRO)")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "m")
plt.show()
# -- Butterfly orbit
# Butterfly orbit states
r0 = np.array([1.03599510774957, 0, 0.173944812752286])
v0 = np.array([0, -0.0798042160573269, 0])
tf = 2.78676904546834
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("Butterfly orbit")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "r")
plt.show()
# -- Vertical orbit
# Vertical orbit states
r0 = np.array([0.504689989562366, 0, 0.836429774762193])
v0 = np.array([0, 0.552722840538063, 0])
tf = 6.18448756121754
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("L2 Vertical orbit")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "g")
plt.show()
# -- Propage STM
# propagate base trajectory with state-transition-matrix
STM0 = np.eye(6)
rf, vf, STM = propagateSTM(mu, r0, v0, STM0, tofs, rtol=1e-11)
# STM is a matrix of partial derivatives which are used in Newton-Raphson
# methods for trajectory design
|
edmorley/django | tests/file_storage/tests.py | Python | bsd-3-clause | 38,661 | 0.001397 | import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage | _class('django.core.files.storage.NonexistentStorage')
def test_get_nonexisten | t_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_alg |
jh0ker/scambot | bot.py | Python | lgpl-3.0 | 21,774 | 0.000643 | import logging
from datetime import datetime
from io import BytesIO, BufferedReader
from telegram.ext import Updater, CommandHandler, RegexHandler, \
MessageHandler, Filters, CallbackQueryHandler
from telegram.ext.dispatcher import run_async
from telegram import ParseMode, ReplyKeyboardMarkup, ReplyKeyboardHide, \
ChatAction, ForceReply, InlineKeyboardMarkup, InlineKeyboardButton, Emoji
from telegram.utils.botan import Botan
from pony.orm import db_session, select, desc
from credentials import TOKEN, BOTAN_TOKEN
from start_bot import start_bot
from database import db
from admin import Admin
from scammer import Scammer
from reporter import Reporter
# States the bot can have (maintained per chat id)
MAIN, ADD_SCAMMER, REMOVE_SCAMMER, ADD_ADMIN, REMOVE_ADMIN, PHONE_NR,\
ACCOUNT_NR, BANK_NAME, REMARK, SEARCH, ADD_INFO, EDIT, ATTACHMENT =\
range(13)
options = {PHONE_NR: "Phone number", ACCOUNT_NR: "Account number",
BANK_NAME: "Name of bank account owner", REMARK: "Admin remark",
ATTACHMENT: "Attachment"}
# Enable reverse lookup
for k, v in list(options.items()):
options[v] = k
_grid = [[options[ACCOUNT_NR]],
[options[BANK_NAME]],
[options[PHONE_NR]],
[options[REMARK]],
[options[ATTACHMENT]],
['/cancel']]
CAT_KEYBOARD = ReplyKeyboardMarkup(_grid, selective=True)
DB_NAME = 'bot.sqlite'
state = dict()
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
u = Updater(TOKEN)
dp = u.dispatcher
db.bind('sqlite', DB_NAME, create_db=True)
db.generate_mapping(create_tables=True)
with db_session:
if len(select(a for a in Admin if a.id is 10049375)) is 0:
# Create initial admin account
Admin(id=10049375, first_name="Jannes", super_admin=True)
if len(select(a for a in Admin if a.id is 46348706)) is 0:
# Create initial admin account
Admin(id=46348706, first_name="Jackson", super_admin=True)
# pass
botan = False
if BOTAN_TOKEN:
botan = Botan(BOTAN_TOKEN)
help_text = "This bot keeps a database of known scammers by recording their " \
"phone number, bank account number and name.\n\n" \
"<b>Usage:</b>\n" \
"/search - Search the database for reports\n\n" \
"Donations via BTC are welcome: 1EPu17mBM2zw4LcupURgwsAuFeKQrTa1jy"
admin_help_text = "\n\n" \
"<b>Admin commands:</b>\n" \
"/new - Add a new scammer report\n" \
"/edit - Edit an existing report\n" \
"/delete - Delete a scammer report\n" \
"/cancel - Cancel current operation"
super_admin_help_text = "\n\n" \
"<b>Super Admin commands:</b>\n" \
"/add_admin - Register a new admin\n" \
"/remove_admin - Remove an admin\n" \
"/download_database - Download complete database"
def error(bot, update, error):
""" Simple error handler """
logger.exception(error)
def help(bot, update):
""" Handler for the /help command """
from_user = update.message.from_user
chat_id = update.message.chat_id
with db_session:
admin = get_admin(from_user)
text = help_text
if admin:
text += admin_help_text
if admin.super_admin:
text += super_admin_help_text
bot.sendMessage(chat_id,
text=text,
parse_mode=ParseMode.HTML,
disable_web_pa | ge_preview=True)
def get_admin(from_user):
admin = Admin.get(id=from_user.id)
if admin:
admin.first_name = from_user.first_name
admin.last_name = from_user.last_name
| admin.username = from_user.username
return admin
def get_reporter(from_user):
reporter = Reporter.get(id=from_user.id)
if reporter:
reporter.first_name = from_user.first_name
reporter.last_name = from_user.last_name
reporter.username = from_user.username
return reporter
def message_handler(bot, update):
global state
forward_from = update.message.forward_from
chat_id = update.message.chat_id
chat_state = state.get(chat_id, MAIN)
reply = None
reply_markup = ReplyKeyboardHide(selective=True)
with db_session:
if chat_state is ADD_SCAMMER and forward_from:
reporter = get_reporter(forward_from)
if not reporter:
reporter = Reporter(id=forward_from.id,
first_name=forward_from.first_name,
last_name=forward_from.last_name,
username=forward_from.username)
track(update, 'new_reporter')
scammer = Scammer(added_by=get_admin(update.message.from_user))
scammer.reported_by.add(reporter)
track(update, 'new_report')
db.commit()
reply = "Created report <b>#%d</b>! Please enter scammer " \
"information:" % scammer.id
reply_markup = CAT_KEYBOARD
state[chat_id] = [ADD_INFO, scammer.id]
elif chat_state is EDIT:
try:
report_id = int(update.message.text.replace('#', ''))
except ValueError:
reply = "Not a valid report number. Try again or use " \
"/cancel to abort."
else:
scammer = Scammer.get(id=report_id)
if scammer:
reply = "%s\n\nPlease enter new " \
"scammer information:" % str(scammer)
reply_markup = CAT_KEYBOARD
state[chat_id] = [ADD_INFO, scammer.id]
else:
reply = "Could not find report number. Try again or " \
"use /cancel to abort."
elif chat_state is REMOVE_SCAMMER:
try:
report_id = int(update.message.text.replace('#', ''))
except ValueError:
reply = "Not a valid report number. Try again or use " \
"/cancel to abort."
else:
scammer = Scammer.get(id=report_id)
if scammer:
scammer.delete()
reply = "Deleted report!"
del state[chat_id]
else:
reply = "Could not find report number. Try again or " \
"use /cancel to abort."
reply_markup = ForceReply(selective=True)
elif chat_state is ADD_ADMIN and forward_from:
admin = get_admin(forward_from)
if not admin:
Admin(id=forward_from.id,
first_name=forward_from.first_name,
last_name=forward_from.last_name,
username=forward_from.username)
reply = "Successfully added admin"
else:
reply = "This user is already an admin"
del state[chat_id]
elif chat_state is REMOVE_ADMIN and forward_from:
admin = get_admin(forward_from)
if admin and not admin.super_admin:
admin.delete()
reply = "Successfully removed admin"
else:
reply = "This user is not an admin"
del state[chat_id]
elif (isinstance(chat_state, tuple) and
chat_state[0] is SEARCH and
update.message.text):
issued = chat_state[1]
if (datetime.now() - issued).seconds > 30:
reply = "Please send your /search query within 30 seconds."
del state[chat_id]
else:
text = update.message.text.replace('%', '')
scammers = select(
s for s in Scammer if
text in s.phone_nr or
text in s.account_nr or
text in s.bank_name or
text in s.remark
|
rossengeorgiev/aprs-python | aprslib/__init__.py | Python | gpl-2.0 | 1,577 | 0.002536 | # aprslib - Python library for working with APRS
# Copyright (C) 2013-2014 Rossen Georgiev
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Fo | undation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; | if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
APRS library in Python
Currently the library provides facilities to:
- parse APRS packets
- Connect and listen to an aprs-is packet feed
"""
# Py2 & Py3 compability
import sys
if sys.version_info[0] >= 3:
is_py3 = True
string_type = (str, )
string_type_parse = string_type + (bytes, )
int_type = int
else:
is_py3 = False
string_type = (str, unicode)
string_type_parse = string_type
int_type = (int, long)
from datetime import date as _date
__date__ = str(_date.today())
del _date
__version__ = "0.7.0"
version_info = (0, 7, 0)
__author__ = "Rossen Georgiev"
__all__ = ['IS', 'parse', 'passcode']
from aprslib.exceptions import *
from aprslib.parsing import parse
from aprslib.passcode import passcode
from aprslib.inet import IS
|
CodyKochmann/sync_lab | simple_notepad_server/cherrypy/wsgiserver/wsgiserver2.py | Python | mit | 90,364 | 0.000266 | """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping | off and then
handling each connection in turn. Each connection can co | nsist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_fileobject',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import os
try:
import queue
except:
import Queue as queue
import re
import rfc822
import socket
import sys
if 'win' in sys.platform and hasattr(socket, "AF_INET6"):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
try:
import cStringIO as StringIO
except ImportError:
import StringIO
DEFAULT_BUFFER_SIZE = -1
class FauxSocket(object):
"""Faux socket with the minimal interface required by pypy"""
def _reuse(self):
pass
_fileobject_uses_str_type = isinstance(
socket._fileobject(FauxSocket())._rbuf, basestring)
del FauxSocket # this class is not longer required for anything.
import threading
import time
import traceback
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
import operator
from urllib import unquote
import warnings
if sys.version_info >= (3, 0):
bytestr = bytes
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding.
"""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
bytestr = str
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding.
"""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [
ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']
]
import logging
if not hasattr(logging, 'statistics'):
logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v |
KlubbAlfaRomeoNorge/members | constants.py | Python | gpl-2.0 | 1,596 | 0 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# Portello membership system
# Copyright (C) 2014 Klubb Alfa Romeo Norge
#
# This program is | free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is dis | tributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# -------------------------------------------------------------------------
"""Various constants (that probably should be config options later on)"""
# Name of the ordinary member type
DEFAULT_MEMBER_NAME = 'Medlem'
# Name of the support membership type
DEFAULT_SUPPORT_MEMBER_NAME = 'Støttemedlem'
# Name of the status newly signed up members should be set to
SIGNUP_STATUS_NAME = 'Innmeldt'
# Name of status for members that have received welcome letter
WELCOME_LETTER_NAME = 'Velkomstpakke'
DEFAULT_MEMBER_STATUS_NAME = 'Medlem'
SERVER_URL = 'https://klubbalfaromeonorge.appspot.com'
PROFILE_URL = SERVER_URL + '/selfservice/profile'
DEFAULT_MODEL_NAME = 'Annen Alfa Romeo'
MEMBER_TYPE_EXPIRED = 'Utmeldt'
|
PyBossa/pybossa | test/test_jobs/test_schedule_jobs.py | Python | agpl-3.0 | 3,216 | 0.001555 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from pybossa.jobs import schedule_job
from rq_scheduler import Scheduler
import settings_test
from redis.sentinel import Sentinel
def a_function():
return
def another_function():
return
a_job = dict(name=a_function, args=[], kwargs={},
interval=1, timeout=180)
another_job = dict(name=another_function, args=[], kwargs={},
interval=1, timeout=180)
class TestSetupScheduledJobs(object):
"""Tests for setup function 'schedule_job'"""
def setUp(self):
sentinel = Sentinel(settings_test.REDIS_SENTINEL)
db = getattr(settings_test, 'REDIS_DB', 0)
self.connection = sentinel.master_for('mymaster', db=db)
self.connection.flushall()
self.scheduler = Scheduler('test_queue', connection=self.connection)
def test_adds_scheduled_job_with_interval(self):
a_job['interval'] = 7
schedule_job(a_job, self.scheduler)
sched_jobs = self.scheduler.get_jobs()
assert len(sched_jobs) == 1, sched_jobs
assert sched_jobs[0].meta['interval'] == 7 , sched_jobs[0].meta
a_job['interval'] = 1
def test_adds_several_jobs_(self):
schedule_job(a_job, self.scheduler)
schedule_job(another_job, self.scheduler)
sched_jobs = self.scheduler.get_jobs()
job_func_names = [job.func_name for job in sched_jobs]
module_name = 'test_jobs.test_schedule_jobs'
assert len(sched_jobs) == 2, sched_jobs
assert module_name + '.a_function' in job_func_names, job_func_names
assert module_name + '.another_function' in job_func_names, job_func_names
def test_does_not_add_job_if_already_added(self):
schedule_job(a_job, self.scheduler)
schedule_job(a_job, self.scheduler)
sched_jobs = self.scheduler.get_jobs()
assert len(sched_jobs) == 1, sched_jobs
def test_returns_log_messages(self):
success_message = schedule_job(a_job, self.scheduler)
failure_message = schedule | _job(a_job, self.scheduler)
assert success_message == 'Scheduled a_function([], {}) to run every 1 seconds'
assert failure_message == 'WARNING: Job a_function([], {}) is already scheduled'
def test_failed_attempt_to_schedule_does_not_polute_redis(self):
schedule_job(a_job, self.scheduler)
schedule_job(a_job, self.scheduler)
stored_values = self.conne | ction.keys('rq:job*')
assert len(stored_values) == 1, len(stored_values)
|
chbrandt/zyxw | eada/io/fits.py | Python | gpl-2.0 | 9,491 | 0.018544 | """ Module to deal with FITS catalog read/access """
##@file fits_data
import sys;
import pyfits;
import string;
import numpy as np;
import re;
# ---
def sort_by_column(tbhdu,fieldname):
"""
Sort a FITS table HDU by "fieldname" column in increasing order.
Inputs:
- tbhdu: FITS table HDU
- fieldname <str> : field name of the column to sort
Output:
- new tbhdu with data sorted according to 'fieldname' column
"""
from operator import itemgetter, attrgetter
coldefs = tbhdu.columns
tbdata = tbhdu.data
index = tbdata.names.index(fieldname)
sorted_data = np.transpose(sorted(tbdata,key=itemgetter(index)))
cols = [];
for i in xrange(len(coldefs.names)):
cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=sorted_data[i]) );
coldefs = pyfits.ColDefs(cols);
return pyfits.new_table(coldefs);
# ---
def merge_tbHDU(tbhdu_A, tbhdu_B):
"""
Merge two tables (HDU) columns
A new table (HDU) containing table 'A' and 'B'
columns is output. It is expected that both tables
lines are the same (same order). If one of them
has fewer lines, 0/NULL value is given to complete
the lines/columns.
Input:
- tbhdu_A : pyfits.BinTableHDU
- tbhdu_B : pyfits.BinTableHDU
Output:
- tbhdu : pyfits.BinTableHDU
Result from input tables merge
---
"""
new_tb = tbhdu_A.columns + tbhdu_B.columns;
return pyfits.new_table(new_tb);
# ---
def extend_tbHDU(tbhdu_A, tbhdu_B):
"""
Extend first tbHDU with second entries
The output is a table (HDU) with column 'B' lines
extending 'A' entries. Column names on both input
table need to be the same.
Input:
- tbhdu_A : FITS binary table HDU
- tbhdu_B : FITS binary table HDU
Output:
- tbhdu : FITS binary table HDU
Result from extention, A+B
---
"""
Nrows_A = tbhdu_A.header['NAXIS2'];
Nrows_B = tbhdu_B.header['NAXIS2'];
Nrows = Nrows_A + Nrows_B;
new_tb = pyfits.new_table(tbhdu_A.columns,nrows=Nrows);
for name in tbhdu_A.columns.names:
new_tb.data.field(name)[Nrows_A:] = tbhdu_B.data.field(name);
return new_tb;
# ---
def select_columns(tbhdu, *fieldnames):
"""
Select particular columns from given table
A new table with only the asked columns ('fieldnames')
is output.
Input:
- tbhdu : pyfits.open('data.fit')[?]
Table HDU, often "?" equals 1
- cols : str,
Comma separated list of variables to be read from 'hdulist'
Output:
-> (new) BinTableHDU, with just the selected fields
---
"""
coldefs = tbhdu.columns;
tbdata = tbhdu.data;
inds = [ tbdata.names.index(id.upper()) for id in fieldnames ];
cols = [];
for i in inds:
cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=tbdata.field(i)) );
coldefs = pyfits.ColDefs(cols);
return pyfits.new_table(coldefs);
# ---
def select_rows(tbhdu, indices):
"""
Read rows(indexes) from given HDU (catalog)
A new table with only the asked table indexes,
'indices', is output.
Input:
- tbhdu [BinTableHDU] : FITS table HDU
- indices [int,] : List of indexes to read from tbhdu
Output:
-> (new) BinTableHDU : sub-selection (rows) of tbhdu
---
"""
data = tbhdu.data.take(indices);
return pyfits.BinTableHDU(data);
# ---
def select_entries(tbhdu, fieldname, values):
"""
Read entries (lines) from given HDU (catalog)
'values' matching entries in column 'fieldname'
is used to select rows from given 'tbhdu'. A new
table (HDU) is generated from selected lines.
Input:
- tbhdu [BinTableHDU] : Table HDU, often "?" equals to 1
- fieldname [str] : Field (column) name that 'values' should match
- values [fieldname type] : List of values to match in 'field' entries
Output:
-> (new) BinTableHDU : sub-selection (rows) of tbhdu
---
"""
_inds = [];
try:
for i in values:
_inds.extend( np.where(tbhdu.data.field(fieldname)==i)[0].tolist() );
except:
_inds.extend( np.where(tbhdu.data.field(fieldname)==values)[0].tolist() );
return select_rows(tbhdu,_inds);
# ---
def sample_entries(tbhdu, **kwargs):
"""
Retrieve a slice of given table HDU
'kwargs' are keyworded arguments to pass threshold
parameter values for the filtering. kwargs' keys are
expected to be fieldnames in tbhdu, and the values
can be just a single number - used as minimum threshold -
or a tuple - for (min,max) thresholds.
Input:
- tbhdu : pyfits.BinTableHDU
FITS table HDU
- kwargs : {'key1'=value1,}
Key=Value, a dictionary
Output:
- tbhdu : pyfits.BinTableHDU
Binary table HDU with selected lines
---
"""
indices = []
for _key in kwargs.keys():
try:
_min,_max = kwargs[_key];
_in = np.where(tbhdu.data.field(_key) >= _min)[0].tolist();
_ax = np.where(tbhdu.data.field(_key) <= _max)[0].tolist();
inds = list(set.intersection(set(_in),set(_ax)));
except:
_min = kwargs[_key];
inds = np.where(tbhdu.data.field(_key) >= _min)[0].tolist();
if (len(indices) == 0):
indices = inds
else:
indices = list(set.intersection(set(indices),set(inds)))
tbsamp = select_rows(tbhdu,indices);
return tbsamp;
# ---
def dict_to_tbHDU(dic, tbname='', *fieldnames):
""" Return a table HDU with 'dic' keys|lists
This function is designed to handle lists of number/string
values (not arrays). The 'keys' used to label 'dic' values
(list) are used to label the output data table and each
column format (data type) will be estimated from data.
It can be used one-dimensional numpy arrays in 'dic' entries.
Valid data-types: 'int', 'int32', 'int64', 'float', 'float32',
'float64', 'complex', 'long/object' and string(character)
Maximum size of string values is 10 characters.
If 'fieldnames' are given, only them (from 'dic') are read
and written to output (new) table.
Input:
- dic : {'key':[],}
Dictionary with a collection of key:column data
- tbname : str
Output tbale header name
- fieldnames : str,
[optional] keys to read from 'dic'
Output:
- tbhdu : pyfits.BinTableHDU
A HDU associated to a table with 'dic' contents
---
"""
args = fieldnames;
if not len(args):
args = dic.keys();
# Function verify datatypes for convertion
#
def is_numeric(lit):
try:
return complex(lit);
except:
pass;
try:
return float(lit);
except ValueError:
pass;
try:
return int(lit);
except ValueError:
pass;
return lit;
c = [];
to_header = {};
for _arg in args:
if type(dic[_arg]) == type(str()):
to_header[_arg] = dic[_arg];
continue;
try:
valores = dic[_arg]
ti | po = valores.dtype
ndim = valores.ndim
except AttributeError:
valores = [ is_numeric(s_i) for s_i in dic[_arg] ];
tipo = np.asarray(valores).dtype
ndim = np.asarray(valores).ndim
try:
if tipo == np.dtype('int64'):
tipo = str(ndim)+'K'
elif tipo == np.dtype('int32') or tipo == np.dtype('int'):
tipo = st | r(ndim)+'J'
elif tipo == np.dtype('float64') or tipo == np.dtype('float'):
tipo = str(ndim)+'D'
elif tipo == np.dtype('float32'):
tipo = str(ndim)+'E'
elif tipo == np.dtype('complex'):
tipo = |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/shapely/geometry/proxy.py | Python | agpl-3.0 | 1,342 | 0.000745 | """Proxy for coordinates stored outside Shapely geometries
"""
from shapely.geos import lgeos
from shapely import wkb
EMPTY = wkb.deserialize('010700000000000000'.decode('hex'))
class CachingGeometryProxy(object):
context = None
factory = No | ne
__geom__ = EMPTY
_gtag = None
def __init__(self, context):
self.context = context
|
@property
def _is_empty(self):
return self.__geom__ in [EMPTY, None]
def empty(self):
if not self._is_empty:
lgeos.GEOSGeom_destroy(self.__geom__)
self.__geom__ = EMPTY
@property
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
self.__geom__, n = self.factory(self.context)
self._gtag = gtag
return self.__geom__
def gtag(self):
return hash(repr(self.context))
class PolygonProxy(CachingGeometryProxy):
@property
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
self.__geom__, n = self.factory(self.context[0], self.context[1])
self._gtag = gtag
return self.__geom__
|
ofermend/medicare-demo | socialite/jython/Lib/pickle.py | Python | apache-2.0 | 44,792 | 0.002366 | """Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision$" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol | is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protoc | ol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
" |
globus/globus-release-tools | share/python/repo/__init__.py | Python | apache-2.0 | 17,563 | 0 | # Copyright 2014-2015 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Repository Management
"""
import datetime
import fnmatch
import hashlib
import os
import os.path
import re
default_root = "/mcs/globus.org/ftppub/gt6"
default_api_root = "/mcs/globus.org/api"
default_releases = ["unstable", "testing", "stable"]
public_key = """-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.5 (GNU/Linux)
mQGiBE0PXQkRBAC12PfwFzMyTKAvCp3AEbzdwwDyEaBHYmd1+Dv+q5c48fEZQrzA
PuZ75BnG8BRIo3ZSYJll9Xf5v8A0M6F35msBBdjUpI+PHZvSQ+yru6U3w9XCsmO9
jSGWM1XAw/hcDWOsETOsjJ56AqIKndOXtG2jeOMFD0MwJus9paDcv5pPkwCgk3Fk
I+GdLaZf0O6vGUtq2Fo2EgkD/14AQ4SyUufwztQeLwlYXyihdUoIVBl4wm4fndJb
TuzTlp3V/oabM8t+V92ftbqlAesFb1FdFQ9NeUEY0VIODR2OTsmEfLUSMK/kRfXM
4FatObXpEp58EydZb3oz/vwASEk1Nno5OW2noGZL3sCk+3j65MstI2q4kMvNSvl+
JEjUBACgNv/mDrn0UjWBuzxOuZrh1r2rBdsjIHx31o/vBF5YLfQhErZQTm6cfpRK
W32Nm18btrqgxxHFAMb4wxnVxAxdM3zLSAaiqvi33z2wHReh5TfaVKpJBj7LpMSI
hwu50iovsBjE7HiusJQBWBtk8Bqp4g9ic2sPV0caEMCUXU5R9bQjR2xvYnVzIFRv
b2xraXQgPHN1cHBvcnRAZ2xvYnVzLm9yZz6IYAQTEQIAIAUCTQ9dCQIbAwYLCQgH
AwIEFQIIAwQWAgMBAh4BAheAAAoJEESufsL68kNlb6IAoIemS8dr65xCkA4GQzgJ
ngXwZgtvAKCOKs5Ork6HiNKIrWRGMLvA7iktBbkCDQRND10SEAgA37cRQGj/QNcc
OjyBrL6e2wPT7UtpXBEHzfjhtmT8+VC+PSbKRxVfawLBtrfzSAAwsmye3c+XK/VB
Pa06vSSmezeyNau+XtEVLwrwQwO/kM6wgNtb7zYyI67Y6XEPP+ZlqpZ0W14cTZBD
3SXWuu6zqjdtUnJCg/j/j0zH5TZa40aCfisERxNCQeoePk2gmMTJDJF0ASM3Nhys
QIP9qpCA+eOJnKmMeEgDCW9j2mYO4tp9lCSbi15HAb41HKN6xypNWk+EHKyu9n50
88UocRHXLZFujzNTGIokWAcoC0D3qpVQehtAVgt1VPrE6MxFPek8ZN4Ho++92KB7
F6E0OsfF6wADBggAnNPguzYAIztF/EzZANUU/7Eon9zJaD4Lf/mnhB3bMuGvenY0
7HSBAXbUxVXs7uX3S6u9PZ9dytl2Fqh8w47TNcC0ACKLRnhxTJ92LLakzAGVGtNz
2W9l+YJaZ6qIQR9FmYpCyIWp6Vm47yOARThrMtnwUhb53g5ZfxgzpHNUDN/7utTy
3sUaMRiijecmSVhDFbrz7ryY2Btlcr7ZrBo0ODHohDkZVn2UrzE6qg9g5np03zYe
5OUM5Lt5GYZJSKZO81aJ5+9DlkiAev3BFEeCsSOwjrqLZpsr0olbIfeHCi8pvjOJ
SCfx4Qs/hI34ykaUn3AgbgxqT0mSKfMasg2bIIhJBBgRAgAJBQJND10SAhsMAAoJ
EESufsL68kNlBuAAnRRI5jFAvyjtQaoQpVqSL4/O45D7AJ9WrW/vxTzN0OyZyUU6
8T0dJyXArA==
=r6rU
-----END PGP PUBLIC KEY BLOCK-----
"""
uid = os.getuid()
gid = None
def _digest_file(filename, force=False):
"""
Compute the md5, sha1, sha512 hashes of a file and write them to disk.
Parameters
----------
*filename*::
Name of the file to compute the hash of (str)
*force*::
Overwrite existing hash file (bool [False])
"""
if fnmatch.fnmatch(filename, "*.md5") or \
fnmatch.fnmatch(filename, "*.sha1") or \
fnmatch.fnmatch(filename, "*.sha512"):
return
for h in ['md5', 'sha1', 'sha512']:
hashname = filename + "." + h
if (force
or not os.path.exists(hashname)
or os.path.getmtime(filename) > os.path.getmtime(hashname)):
digester = hashlib.new(h)
f = file(filename, "r")
digester.update(f.read())
f.close()
f = file(hashname, "w")
f.write(
"%s %s\n" %
(digester.hexdigest(), filename.split(os.sep)[-1]))
f.close()
class Repository(object):
"""
Repository class
===================
This class contains the generic package management features for the various
metadata types associated with different repository systems. It contains
algorithms for matching packages and selecting ones to copy into another
repository based on version matches. This is subclassed to implement the
actual metdata parsing for various metadata formats.
"""
def __init__(self):
self.packages = {}
def get_packages(
self, name=None, arch=None, version=None, source=None,
newest_only=False):
"""
Construct a list of packages that match the optional parameters. If
source is an Metadata object, match packages that have that package
as the source package. Otherwise, filter the package list based on
the name if not None, further filtering on version and arch if they
are not None. If newest_only is True, only return the highest versions
of the packages which match
"""
package_candidates = []
if source is not | None:
return [
(package)
for package_list in self.packages
for package in self.packages[package_list]
if package.source_name == source.source_name
and package.version == source.version
]
elif name is not None:
| if version is not None:
package_candidates = [
(package)
for package_list in self.packages
for package in self.packages[package_list]
if name == package.name
and package.version == version
]
else:
package_candidates = [
(package)
for package_list in self.packages
for package in self.packages[package_list]
if name == package.name
]
if arch is not None:
package_candidates = [
(p)
for p in package_candidates if p.arch == arch
]
if newest_only and len(package_candidates) > 0:
newv = package_candidates[-1].version
return [p for p in package_candidates if p.version == newv]
elif newest_only:
return []
else:
return package_candidates
else:
package_candidates = []
for n in self.packages:
package_candidates.extend(
self.get_packages(
name=n, arch=arch, newest_only=newest_only))
return package_candidates
def is_newer(self, pkg):
"""
Check to see if *pkg* is newer than any versions of the same package
name within this repository. Returns 'True' if it is, 'False'
otherwise.
Parameters
----------
*self*:
This Repository object
*pkg*:
Package metadata to compare against the versions in *self*.
Returns
-------
Boolean
"""
matches = self.get_packages(pkg.name, arch=pkg.arch, newest_only=True)
return matches == [] or pkg > matches[-1]
def __contains__(self, pkg):
"""
Check to see if pkg is included in this Repository
"""
return len(self.get_packages(
name=pkg.name, arch=pkg.arch,
version=pkg.version, newest_only=True)) > 0
def __iter__(self):
"""
Iterate through the packages in this repository
"""
return self.packages.keys()
@staticmethod
def create_index(path, recursive=False):
for root, dirs, filenames in os.walk(path, topdown=not recursive):
if not recursive:
del dirs[0:]
indexfile = os.path.join(root, "index.html")
index_mtime = 0
regenerate_index = False
if os.path.exists(indexfile):
index_mtime = os.stat(indexfile).st_mtime
else:
regenerate_index = True
if not regenerate_index:
for dir in dirs:
fulldir = os.path.join(root, dir)
if os.stat(fulldir).st_mtime >= index_mtime:
regenerate_index = True
break
if not regenerate_index:
for filename in fil |
dhylands/upy-examples | uctypes_test.py | Python | mit | 763 | 0.011796 | import uctypes
ACCEL_CONFIG = {
'x_self_test' : uctypes.BFUINT8 | 0 | 7 << uctypes.BF_POS | 1 << uctypes.BF_LEN,
'y_self_test' : uctypes.BFUINT8 | 0 | 6 << uctypes.BF_POS | 1 << uctypes.BF_LEN,
'z_self_test' : uctypes.BFUINT8 | 0 | 5 << uctypes.BF_POS | 1 << uctypes.BF_LEN,
'range' : uctypes.BFUINT8 | 0 | 3 << uctypes.BF_POS | 2 << uctypes.BF | _LEN | ,
}
buf = bytearray(1)
buf[0] = 0xa8
print('buf[0] =', hex(buf[0]))
accel_config = uctypes.struct(ACCEL_CONFIG, uctypes.addressof(buf))
print('x_self_test =', accel_config.x_self_test)
print('y_self_test =', accel_config.y_self_test)
print('z_self_test =', accel_config.z_self_test)
print('range =', accel_config.range)
accel_config.y_self_test = 1
print('buf[0] =', hex(buf[0]))
|
wxue/xiakelite | libs/flask/module.py | Python | mit | 8,422 | 0.000712 | # -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .helpers import _PackageBoundObject, _endpoint_from_view_func
def _register_module(module, static_path):
"""Internal helper function that returns a function for recording
that registers the `send_static_file` function for the module on
the application if necessary. It also registers the module on
the application.
"""
def _register(state):
state.app.modules[module.name] = module
# do not register the rule if the static folder of the
# module is the same as the one from the application.
if state.app.root_path == module.root_path:
return
path = static_path
if path is None:
path = state.app.static_path
if state.url_prefix:
path = state.url_prefix + path
state.app.add_url_rule(path + '/<path:filename>',
endpoint='%s.static' % module.name,
view_func=module.send_static_file,
subdomain=module.subdomain)
return _register
class _ModuleSetupState(object):
def __init__(self, app, url_prefix=None, subdomain=None):
self.app = app
self.url_prefix = url_prefix
self.subdomain = subdomain
class Module(_PackageBoundObject):
"""Container object that enables pluggable applications. A module can
be used to organize larger applications. They represent blueprints that,
in combination with a :class:`Flask` object are used to create a large
application.
A module is like an application bound to an `import_name`. Multiple
modules can share the same import names, but in that case a `name` has
to be provided to keep them apart. If different import names are used,
the rightmost part of the import name is used as name.
Here an example structure for a larger appliation::
/myapplication
/__init__.py
/views
/__init__.py
/admin.py
/frontend.py
The `myapplication/__init__.py` can look like this::
from flask import Flask
from myapplication.views.admin import admin
from myapplication.views.frontend import frontend
app = Flask(__name__)
app.register_module(admin, url_prefix='/admin')
app.register_module(frontend)
And here an example view module (`myapplication/views/admin.py`)::
from flask import Module
admin = Module(__name__)
@admin.route('/')
def index():
pass
@admin.route('/login')
def login():
pass
For a gentle introduction into modules, checkout the
:ref:`working-with-modules` section.
.. versionadded:: 0.5
The `static_path` parameter was added and it's now possible for
modules to refer to their own templates and static files. See
:ref:`modules-and-resources` for more information.
.. versionadded:: 0.6
The `subdomain` parameter was added.
:param import_name: the name of the Python package or module
implementing this :class:`Module`.
:param name: the internal short name for the module. Unless specified
the rightmost part of the import name
:param url_prefix: an optional string that is used to prefix all the
URL rules of this module. This can also be specified
when registering the module with the application.
:param subdomain: used to set the sub | domain setting for URL rules that
do not have a subdomain setting set.
:param static_path: can be used to specify a different path for the
static files on the web. Defaults to ``/stat | ic``.
This does not affect the folder the files are served
*from*.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
_PackageBoundObject.__init__(self, import_name)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self._register_events = [_register_module(self, static_path)]
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a module. The endpoint for the
:func:`url_for` function is prefixed with the name of the module.
"""
def decorator(f):
self.add_url_rule(rule, f.__name__, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a module. The endpoint for
the :func:`url_for` function is prefixed with the name of the module.
.. versionchanged:: 0.6
The `endpoint` argument is now optional and will default to the
function name to consistent with the function of the same name
on the application object.
"""
def register_rule(state):
the_rule = rule
if state.url_prefix:
the_rule = state.url_prefix + rule
options.setdefault('subdomain', state.subdomain)
the_endpoint = endpoint
if the_endpoint is None:
the_endpoint = _endpoint_from_view_func(view_func)
state.app.add_url_rule(the_rule, '%s.%s' % (self.name,
the_endpoint),
view_func, **options)
self._record(register_rule)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a module. This function
is only executed before each request that is handled by a function of
that module.
"""
self._record(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a module.
"""
self._record(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a module. This function
is only executed after each request that is handled by a function of
that module.
"""
self._record(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a module. Such a function
is executed after each request, even if outside of the module.
"""
self._record(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a module. This
function is only executed for requests handled by a module.
"""
self._record(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a module. Such a
function is executed each request, even if outside of the module.
"""
self._record(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a module. This
handler is used for all requests, even if outside of the module.
. |
jandom/rdkit | rdkit/Chem/Draw/__init__.py | Python | bsd-3-clause | 14,830 | 0.014295 | #
# Copyright (C) 2006-2016 Greg Landrum
# All Rights Reserved
#
import os, re
from rdkit.six import iteritems
from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions
from rdkit.Chem.Draw.rdMolDraw2D import *
def _getCanvas():
useAGG = False
useCairo = False
useSping = False
Canvas = None
if not os.environ.get('RDKIT_CANVAS', ''):
try:
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo = True
except ImportError:
try:
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG = True
except ImportError:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping = True
else:
canv = os.environ['RDKIT_CANVAS'].lower()
if canv == 'cairo':
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo = True
elif canv == 'agg':
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG = True
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping = True
if useSping:
DrawingOptions.radicalSymbol = '.' #<- the sping canvas doesn't support unicode well
return useAGG, useCairo, Canvas
def _createCanvas(size):
useAGG, useCairo, Canvas = _getCanvas()
if useAGG or useCairo:
try:
import Image
except ImportError:
from PIL import Image
img = Image.new("RGBA", size, (0, 0, 0, 0))
canvas = Canvas(img)
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
canvas = Canvas(size=size, name='MolToImageFile')
img = canvas._image
return img, canvas
def MolToImage(mol, size=(300, 300), kekulize=True, wedgeBonds=True, fitImage=False, options=None,
canvas=None, **kwargs):
"""Returns a PIL image containing a drawing of the molecule
ARGUMENTS:
- kekulize: run kekulization routine on input `mol` (default True)
- size: final image size, in pixel (default (300,300))
- wedgeBonds: draw wedge (stereo) bonds (default True)
- highlightAtoms: list of atoms to highlight (default [])
- highlightMap: dictionary of (atom, color) pairs (default None)
- highlightBonds: list of bonds to highlight (default [])
- highlightColor: RGB color as tuple (default [1, 0, 0])
NOTE:
use 'matplotlib.colors.to_rgb()' to convert string and
HTML color codes into the RGB tuple representation, eg.
from matplotlib.colors import ColorConverter
img = Draw.MolToImage(m, highlightAtoms=[1,2], highlightColor=ColorConverter().to_rgb('aqua'))
img.save("molecule.png")
RETURNS:
a PIL Image object
"""
if not mol:
raise ValueError('Null molecule provided')
if canvas is None:
img, canvas = _createCanvas(size)
else:
img = None
if options is None:
options = DrawingOptions()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if 'highlightColor' in kwargs:
color = kwargs.pop('highlightColor', (1, 0, 0))
options.selectColor = color
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
if 'legend' in kwargs:
legend = kwargs['legend']
del kwargs['legend']
else:
legend = ''
drawer.AddMol(mol, **kwargs)
if legend:
from rdkit.Chem.Draw.MolDrawing import Font
bbox = drawer.boundingBoxes[mol]
pos = size[0] / 2, int(.94 * size[1]), 0 # the 0.94 is extremely empirical
# canvas.addCanvasPolygon(((bbox[0],bbox[1]),(bbox[2],bbox[1]),(bbox[2],bbox[3]),(bbox[0],bbox[3])),
# color=(1,0,0),fill=False,stroke=True)
# canvas.addCanvasPolygon(((0,0),(0,size[1]),(size[0],size[1]),(size[0],0) ),
# color=(0,0,1),fill=False,stroke=True)
font = Font(face='sans', size=12)
canvas.addCanvasText(legend, pos, font)
if kwargs.get('returnCanvas', False):
return img, canvas, drawer
else:
canvas.flush()
return img
def MolToFile(mol, fileName, size=(300, 300), kekulize=True, wedgeBonds=True, imageType=None,
fitImage=False, options=None, **kwargs):
""" Generates a drawing of a molecule and writes it to a file
"""
# original contribution from Uwe Hoffmann
if not fileName:
raise ValueError('no fileName provided')
if not mol:
raise ValueError('Null molecule provided')
if imageType is None:
imageType = os.path.splitext(fileName)[1][1:]
if options is None:
options = DrawingOptions()
useAGG, useCairo, Canvas = _getCanvas()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if useCairo or useAGG:
canvas = Canvas(size=size, imageType=imageType, fileName=fileName)
else:
options.radicalSymbol = '.' #<- the sping canvas doesn't support unicode well
canvas = Canvas(size=size, name=fileName, imageType=imageType)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol, **kwargs)
if useCairo or useAGG:
canvas.flush()
else:
canvas.save()
def MolToImageFile(mol, filename, size=(300, 300), kekulize=True, wedgeBonds=True, **kwargs):
""" DEPRECATED: please use MolToFile instead
"""
img = MolToImage(mol, size=size, kekulize=kekulize, wedgeBonds=wedgeBonds, **kwargs)
img.save(filename)
tkRoot = None
tkLabel = None
tkPI = None
def ShowMol(mol, size=(300, 300), kekulize=True, wedgeBonds=True, title='RDKit Molecule', **kwargs):
""" Generates a picture of a molecule and displays it in a Tkinter window
"""
global tkRoot, tkLabel, tkPI
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
try:
import ImageTk
except ImportError:
from PIL import ImageTk
img = MolToImage(mol, size, kekulize, wedgeBonds, **kwargs)
if not tkRoot:
tkRoot = Tkinter.Tk()
tkRoot.title(title)
tkPI = ImageTk.PhotoImage(img)
tkLabel = Tkinter.Label(tkRoot, image=tkPI)
tkLabel.place(x=0, y=0, width=img.size[0], height=img.size[1])
else:
tkPI.paste(img)
tkRoot.geometry('%dx%d' % (img.size))
def MolToMPL(mol, size=(300, 300), kekulize=True, wedgeBonds=True, imageType=None, fitImage=False,
options=None, **kwargs):
""" Generates a drawing of a molecule on a matplotlib canvas
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
c | anvas = Canvas(size)
if opt | ions is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
drawingOptions.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol = mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol, **kwargs)
omol._atomPs = drawer.atomPs[mol]
for k, v in iteritems(omol._atomPs):
omol._atomPs[k] = canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0]) / 100, float(size[1]) / 100)
return canvas._figure
def calcAtomGaussians(mol, a=0.03, step=0.02, weights=None):
"""
useful things to do with these:
fig.axes[0].imshow(z,cmap=cm.gray,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k')
fig=Draw.MolToMPL(m);
contribs=Crippen.rdMolDescriptors._CalcCrippenContribs(m)
logps,mrs=zip(*contribs)
x,y,z=Draw.calcAtomGaussians(m,0.03,step=0.01,weights=logps)
fig.axes[0].imshow(z,cmap=cm.jet,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k',alpha=0.5)
fig.savefig('coumlogps.colored.png',bbox_inches='tight')
"""
import numpy
from matplotlib import mlab
x |
jeffreychan637/fb-cal-tpa-production | app/server/wix_verifications.py | Python | bsd-2-clause | 1,411 | 0.002126 | """This file handles parsing the Wix Instance which is key to the security of
this app.
"""
from os import environ
from base64 import urlsafe_b64encode, urlsafe_b64decode
from hmac import new
from hashlib import sha256
from json import loads
if "HEROKU" in environ:
wix_secret = environ["wix_secret"]
else:
from secrets import wix_keys
wix_secret = wix_keys["secret"]
__author__ = "Jeffrey Chan"
def instance_parser(instance):
"""This function | parses the Wix instance that comes with every call to t | he
server. If the parse is successful (the instance is from Wix and the
permission is set to owner), the call is from a valid source and
the request it came with should be performed. The function returns the
parsed instance on success and false otherwise.
"""
try:
signature, encoded_json = instance.split(".", 2)
encoded_json_with_padding = encoded_json + ('=' * (4 - (len(encoded_json) % 4)))
parsed_instance = urlsafe_b64decode(
encoded_json_with_padding.encode("utf-8"))
hmac_hashed = new(wix_secret, msg=encoded_json,
digestmod=sha256).digest()
new_signature = urlsafe_b64encode(hmac_hashed).replace("=", "")
if (new_signature == signature):
return loads(parsed_instance)
else:
return False
except Exception:
return False
|
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/special/add_newdocs.py | Python | mit | 189,015 | 0.001508 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
http://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
a | irye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
| http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number o |
Arcbot-Org/Arcbot | bolt/discord/permissions.py | Python | gpl-3.0 | 2,442 | 0.00041 | """
This module provides an enum containing permission flags as per discord API
This module provides a class for talking about permissions within the bot
Contributors:
- Patrick Hennessy
"""
from enum import Enum
class PermissionFlag(Enum):
CREATE_INSTANT_INVITE = 0x00000001
KICK_MEMBERS = 0x00000002
BAN_MEMBERS = 0x00000004
ADMINISTRATOR = 0x00000008
MANAGE_CHANNELS = 0x00000010
MANAGE_GUILD = 0x00000020
ADD_REACTIONS = 0x00000040
VIEW_AUDIT_LOG = 0x00000080
READ_MESSAGES = 0x00000400
SEND_MESSAGES = 0x00000800
SEND_TTS_MESSAGES = 0x00001000
MANAGE_MESSAGES = 0x00002000
EMBED_LINKS = 0x00004000
ATTACH_FILES = 0x00008000
READ_MESSAGE_HISTORY = 0x00010000
MENTION_EVERYONE = 0x00020000
USE_EXTERNAL_EMOJIS = 0x00040000
CONNECT = 0x00100000
SPEAK = 0x00200000
MUTE_MEMBERS = 0x00400000
DEAFEN_MEMBERS = 0x00800000
MOVE_MEMBERS = 0x01000000
USE_VAD = 0x02000000
CHANGE_NICKNAME = 0x04000000
MANAGE_NICKNAMES = 0x08000000
MANAGE_ROLES = 0x10000000
MANAGE_WEBHOOKS = 0x20000000
MANAGE_EMOJIS = 0x40000000
class Permission():
def __init__(self, value):
if isinstance(value, list):
self.bitarray = 0
for item in value:
self.bitarray |= self._get_permission_value(item)
else:
self.bitarray = value
def __contains__(self, item):
if isinstance(item, list):
for i in item:
if not self.allows(i):
return False
else:
return True
else:
return self.allows(item)
def __get__(self):
return self.bitarray
def __repr__(self):
classname = f"{type(self).__name__}"
return f"{classname}({self.bitarray})"
def allows(self, permission_name):
permission = self._get_permission_value(permission_name)
return bool( | self.bitarray & permission)
def add(self, permission_name):
self.bitarray |= self._get_permission_value(permission_name)
def remove(self, permission_name):
self.bitarray &= ~self._get_permission_value(permission_name)
@staticmethod
def _get_permission_value(name):
return getattr(PermissionFlag, name).value or 0x00000000
def to_list(self):
return [flag.name for flag in PermissionFlag if | self.allows(flag.name)]
|
mcallaghan/tmv | BasicBrowser/scoping/management/commands/check_query_file.py | Python | gpl-3.0 | 1,107 | 0.004517 | from django.core.management.base import BaseCommand, CommandError
from django | .core import management
from django.db.models import Count
from scoping.models import *
class Command(B | aseCommand):
help = 'check a query file - how many records'
def add_arguments(self, parser):
parser.add_argument('qid',type=int)
def handle(self, *args, **options):
qid = options['qid']
q = Query.objects.get(pk=qid)
p = 'TY - '
if q.query_file.name is not '':
fpath = q.query_file.path
else:
if q.database=="scopus":
fname = 's_results.txt'
else:
fname = 'results.txt'
fpath = f'{settings.QUERY_DIR}/{qid}/{fname}'
with open(fpath, 'r') as f:
c = f.read().count(p)
print('\n{} documents in downloaded file\n'.format(c))
if q.doc_set.count() > 0:
yts = q.doc_set.values('PY').annotate(
n = Count('pk')
)
for y in yts:
print('{} documents in {}'.format(y['n'],y['PY']))
|
koduj-z-klasa/python101 | docs/mcpi/figury/mcsim-2dKzK.py | Python | mit | 1,853 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import local.minecraft as minecraft # import modułu minecraft
import local.block as block # import modułu block
os.environ["USERNAME"] = "Steve" # nazwa użytkownika
os.environ["COMPUTERNAME"] = "mykomp" # nazwa komputera
# utworzenie połaczenia z symulatorem
mc = minecraft.Minecraft.create("")
def plac(x, y, z, roz=10, gracz=False):
"""Funkcja wypełnia sześcienny obszar od podanej pozycji
powietrzem i opcjonalnie umieszcza gracza w środku.
Parametry: x, y, z - współrzędne pozycji początkowej,
roz - rozmiar wypełnianej przestrzeni,
gracz - czy umieścić gracza w środku
Wymaga: globalnych obiektów mc i block.
"""
kamien = block.STONE
powietrze = block.AIR
# kamienna podłoże
mc.setBlocks(x, y - 1, z, x + roz, y - 1, z + roz, kamien)
# czyszczenie
mc.setBlocks(x, y, z, x + roz, y + roz, z + roz, powietrze)
# umieść gracza w środku
if gracz:
mc.player.setPos(x + roz / 2, y + roz / 2, z + roz / 2)
def main():
mc.postToChat("Czesc! Tak dziala MC chat!") # wysłanie komunikatu do mc
plac(-15, 0, -15, 30)
import local.minecraft | stuff as mcstuff
mcfig = mcstuff.MinecraftDrawing(mc)
mcfig.drawLine(-14, 0, -14, -14, 0, 14, block.LEAVES)
mcfig.drawLine(-14, 0, 0, -7, 0, 14, block.LEAVES)
mcfig.drawLine(-14, 0, 0, -7, 0, -14, block.LEAVES)
mcfig.drawLine(-5, 0, 0, 5, 0, 0, block.LEAVES)
mcfig.drawLine(5, 0, 0, -5, 0, 14, block.LEAVES)
mcfig.drawLine(-5, 0, 14, 5, 0, 1 | 4, block.LEAVES)
mcfig.drawLine(7, 0, -14, 7, 0, 14, block.LEAVES)
mcfig.drawLine(7, 0, 0, 14, 0, 14, block.LEAVES)
mcfig.drawLine(7, 0, 0, 14, 0, -14, block.LEAVES)
return 0
if __name__ == '__main__':
print "Uruchamianie... Proszę czekać..."
main()
|
maszczyn/shim | tests/test_system_load_monitor.py | Python | mit | 1,207 | 0.001657 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import mock
from shim import SystemLoadMonitor, Monito | rCheckFailure
class TestSystemLoadMonitor(unittest.TestCase):
def setUp(self):
self.monitor_id = "monitor_id"
self.monitor_options = {
'upper_threshold': 1.0,
}
self.monitor = self._create_system_load_monitor()
def _create_system_load_monitor(self):
return SystemLoadMonitor(self.monitor_id, **self.monitor_options)
def test_name_includes_monitors_generic_name(self):
self.assertI | n("SystemLoadMonitor", self.monitor.name)
@mock.patch("shim.Monitor._get_raw_value")
def test_get_current_value_returns_the_load_if_command_returned_properly_formatted_data(self, raw_value_patch):
raw_value_patch.return_value = "0.18 0.29 0.31 1/345 16151"
self.assertEqual(self.monitor.get_current_value(), 0.31)
@mock.patch("shim.Monitor._get_raw_value")
def test_get_current_value_raises_MonitorCheckFailure_if_command_returned_malformed_data(self, raw_value_patch):
raw_value_patch.return_value = "Bad data."
self.assertRaises(MonitorCheckFailure, self.monitor.get_current_value)
|
GabMus/lithium-projman | server/projman/models.py | Python | gpl-3.0 | 3,701 | 0.037287 | # This file is part of Lithium-Projman.
#
# Lithium-Projman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lithium-Projman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lithium-Projman. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
import os
class ProjmanUser(models.Model): # inherits Django default user class
user = models.OneToOneField(User, on_delete = models.CASCADE, null = False)
avatar= models.ImageField(upload_to='uploads/photos/%Y/%m/%d', blank = True, null = True)
def create_user(sender, instance, created, **kwargs):
if created:
profile, created = ProjmanUser.objects.get_or_create(user = instance)
post_save.connect(create_user, sender = User)
def __str__(self):
return self.user.username
class Project(models.Model):
# id is already defined as default in django
author = models.ForeignKey('ProjmanUser', null = False)
date_time = models.DateTimeField(auto_now_add = True)
name = models.CharField(max_length=250, blank = False)
description = models.TextField(blank = True)
def __str__(self):
return self.name
class To_do(models.Model):
# id is already defined as default in django
author = models.ForeignKey('ProjmanUser', null = False)
parent_project = models.ForeignKey('Project', null = False)
title = models.TextField(blank = False)
details = models.TextField(blank = True)
done = models.BooleanField(default = False)
date_time = models.DateTimeField(auto_now_add = True)
def __str__(self):
return self.author.user.username+" "+self.title
class Comment_todo(models.Model):
# id is already defined as default in django
author = models.ForeignKey('ProjmanUser', null = False)
parent_todo = models.ForeignKey('To_do', null = False)
content = models.TextField(blank = Fal | se)
date_time = m | odels.DateTimeField(auto_now_add = True)
class Note(models.Model):
# id is already defined as default in django
author = models.ForeignKey('ProjmanUser', null = False)
parent_project = models.ForeignKey('Project', null = False)
title = models.TextField(blank = False)
content = models.TextField(blank = True)
pinned = models.BooleanField(default = False)
date_time = models.DateTimeField(auto_now_add = True)
class Comment_note(models.Model):
# id is already defined as default in django
author = models.ForeignKey('ProjmanUser', null = False)
parent_note = models.ForeignKey('Note', null = False)
content = models.TextField(blank = False)
date_time = models.DateTimeField(auto_now_add = True)
class Participation(models.Model):
user = models.ForeignKey('ProjmanUser', null = False)
project = models.ForeignKey('Project', null = False)
def __str__(self):
return self.user.user.username + " -> " + self.project.name
class Designation(models.Model):
user = models.ForeignKey('ProjmanUser', null = False)
todo = models.ForeignKey('To_do', null = False)
def __str__(self):
return self.user.user.username + " -> " + self.todo.title
class Projcode(models.Model):
project = models.ForeignKey('Project', null = False)
code = models.TextField(null = False, blank = False)
def __str__(self):
return self.project.name
|
ex-nerd/health-stats | health_stats/dataset.py | Python | mit | 1,067 | 0.000937 |
from collections import OrderedDict
class DataSet(object):
__slots__ = (
'events', # List of all events in this data set
'group', # Iterable containing groups of events
)
def __init__(self, query, group_function):
self.events = query.all()
if group_function is None:
self.group = self.events
elif callable(group_function):
self.group = OrderedDict()
for event in self.events:
# Add this event to the group-by entries
key = group_function(event)
if key not in self.group:
self.group[key] = []
s | elf.group[key].append(event)
else:
raise ValueError("group_function is not callable")
def __pretty__(self, p, cycle):
p.text('<{0}: '.format(type(self) | .__name__))
if cycle:
p.text('...')
else:
p.pretty({
'events': self.events,
'group': self.group.keys(),
})
p.text('>')
|
nanchenchen/emoticon-analysis | emoticonvis/apps/corpus/models.py | Python | mit | 5,332 | 0.003188 | import operator
from django.db import models
from django.db.models import Q
from django.db.models import Count
from caching.base import CachingManager, CachingMixin
from emoticonvis.apps.base import models as base_models
from emoticonvis.apps.corpus import utils
import numpy
class Dataset(models.Model):
"""A top-level dataset object containing messages."""
name = models.CharField(max_length=150)
"""The name of the dataset"""
description = models.TextField()
"""A description of the dataset."""
created_at = models.DateTimeField(auto_now_add=True)
"""The :py:class:`datetime.datetime` when the dataset was created."""
start_time = models.DateTimeField(null=True, default=None, blank=True)
"""The time of the first real message in the dataset"""
end_time = models.DateTimeField(null=True, default=None, blank=True)
"""The time of the last real message in the dataset"""
@property
def message_count(self):
return self.message_set.count()
def __unicode__(self):
return self.name
def get_messages_from_selected_participants(self):
return self.messages.filter(participant__is_selected=True).distinct()
def get_emoticons_from_selected_participants(self):
return Emoticon.objects.filter(messages__participant__is_selected=True).distinct()
class Emoticon(models.Model):
"""A code of a message"""
text = base_models.Utf8CharField(max_length=200)
"""The text of the emoticon"""
VALENCE_CHOICES = (
('P', 'Positive'),
('N', 'Negative'),
('O', 'Neutral'),
('U', 'Unknown'),
)
valence = models.CharField(max_length=1, choices=VALENCE_CHOICES, default='U')
def __repr__(self):
return self.text
def __unicode__(self):
return self.__repr__()
class Participant(models.Model):
"""A code of a message"""
dataset = models.ForeignKey(Dataset, default=1)
"""Which :class:`Dataset` the message belongs to"""
name = models.CharField(max_length=100, blank=True)
"""The name of the participant"""
LANG_CHOICES = (
('No', 'Not specified'),
('En', 'English'),
('Fr', 'French'),
)
language = models.CharField(max_length=2, choices=LANG_CHOICES, default='No')
STATUS_CHOICES = (
('No', 'Not specified'),
('Jr', 'Junior'),
('Sr', 'Senior'),
)
status = models.CharField(max_length=2, choices=STATUS_CHOICES, default='No')
position = models.CharField(max_length=32, default=None, null=True)
is_selected = models.BooleanField(default=True)
def __repr__(self):
return self.text
def __unicode__(self):
return self.__repr__()
class LanguageSession(models.Model):
"""
A language session is a continuous time period when participants in the session stay the same
"""
dataset = models.ForeignKey(Dataset)
"""Which :class:`Dataset` the message belongs to"""
start_time = models.DateTimeField(null=True, blank=True, default=None)
"""The :py:class:`datetime.datetime` (in UTC) when the language session starts"""
end_time = models.DateTimeField(null=True, blank=True, default=None)
"""The :py:class:`datetime.datetime` (in UTC) when the language session ends"""
participants = models.ManyToManyField(Participant, related_name="lang_sessions")
num_en = models.IntegerField(default=0)
num_fr = models.IntegerField(default=0)
en_proportion = models.FloatField(default=0)
TYPE_CHOICES = (
('E only', 'E only'),
('major E', 'major E'),
('major F', 'major F'),
('F only', 'F only'),
('Empty', 'Empty')
)
type = models.CharField(max_length=8, choices=TYPE_CHOICES, default=None, null=True)
class Message(models.Model):
"""
The Message is the central data entity for the dataset.
"""
dataset = models.ForeignKey(Dataset)
"""Which :class:`Dataset` the message belongs to"""
idx = models.IntegerField(null=True, blank=True, default=None)
"""The index of the message"""
time = models.DateTimeField(null=True, blank=True, default=None)
"""The :py:class:`datetime.datetime` (in UTC) when the message was sent"""
session_id = models.IntegerField(null=True, blank=True, default=None)
"""The session of the message"""
TYPE_CHOICES = (
(0, 'Normal message'),
(1, 'Someone joined'),
(2, 'Someone left'),
(3, 'Bert message'),
(4, 'Starting log'),
)
type = models.IntegerField(max_length=1, choices=TYPE_CHOICES, default=0)
participant = models.ForeignKey(Participant, related_name="messages", default=None, null=True)
text = base_models.Utf8TextField(null=True, blank=True, default="")
"""The actual text of the message."""
emoticons = models.ManyToManyField(Emoticon, related_name="messages")
lang_session = models.ForeignKey(LanguageSession, related_name="messages", default=None, null=True)
LANG_CHOICES = (
('No', 'Not specified'),
| ('En', 'English'),
('Fr', 'French'),
)
detected_language = models.CharField(max_length=2, choices=LANG_CHOICES, default='No')
def __repr__(self):
retu | rn self.text
def __unicode__(self):
return self.__repr__()
|
mudyc/libvob-c | py/setup.py | Python | gpl-2.0 | 760 | 0.028947 | from distutils.core import setup, Extension
import os
import popen2
line = popen2.popen2('pkg-config --cflags glib-2.0')[0].readline()
print line
inc_dirs = ['../src/']
for s in line.split(" "):
if s.startswith('-I'):
inc_dirs.append(s[2:])
#print inc_dirs
module1 = Extension('libvob',
sources = ['libvob.c'],
include_dirs = inc_dirs,
library_dirs = ['../'],
libraries = [' | vob-c', 'glib-2.0', 'mcheck'],
runtime_library_dirs = [os.path.abspath('. | .')],
)
setup (name = 'PyLibvob',
version = '0.0.1',
description = 'A python bindings for Libvob-c',
#ext_package='libvob',
ext_modules = [module1])
|
Sashkiv/sneakers_shop | sneakers_shop/pkg/main/urls.py | Python | gpl-3.0 | 382 | 0 | from django.conf.urls import url
from sneakers_shop.pkg.main.views import IndexView, AboutView, ContactsView, \
CatalogView, DeliveryView
urlpatterns = [
url(r'^$', IndexView.as_view()),
url(r'^about/', AboutView.as_view()),
url(r'^contacts/', ContactsView.as_view()),
url(r'^c | atalog/', CatalogView.as_view()),
url(r'^delivery/', D | eliveryView.as_view()),
]
|
asoplata/dynasim-benchmark-brette-2007 | output/Brian2/brian2_benchmark_CUBA_nosyn_250/brian2_benchmark_CUBA_nosyn_250.py | Python | gpl-3.0 | 2,003 | 0.003498 | """
# Notes:
- This simulation seeks to emulate the CUBA benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation | does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008).
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_CUBA_nosyn_250/pbsout/brian_benchmark_CUBA_nosyn_250.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.y | ale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 250
defaultclock.dt = 0.01*ms
taum=20*ms
Vt = -50*mV
Vr = -60*mV
El = -49*mV
# The model
eqs = Equations('''
dv/dt = ((v-El))/taum : volt
''')
P = NeuronGroup(cells, model=eqs,threshold="v>Vt",reset="v=Vr",refractory=5*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Initialization
P.v = Vr
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
google-research/graph-attribution | tests/test_training.py | Python | apache-2.0 | 3,709 | 0.003235 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for training GNN models."""
import numpy as np
import sonnet.v2 as snt
import tensorflow.compat.v2 as tf
from absl.testing import absltest, parameterized
from graph_attribution import experiments, featurization
from graph_attribution import graphnet_models as gnn_models
from graph_attribution import graphs as graph_utils
from graph_attribution import templates, training
class TrainingTests(parameterized.TestCase):
"""Basic tests for training a model."""
def _setup_graphs_labels(self, n_graphs):
"""Setup graphs and labels for a binary classification learning task."""
tensorizer = featurization.MolTensorizer()
smiles_pool = ['CO', 'CCC', 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C', 'CCCO']
smiles = np.random.choice(smiles_pool, n_graphs)
graphs = graph_utils.smiles_to_graphs_tuple(smiles, tensorizer)
n_labels = len(graphs.nodes) if n_graphs == 1 else n_graphs
labels = np.random.choice([0, 1], n_labels).reshape(-1, 1)
return graphs, labels
def _setup_model(self, n_graphs):
target_type = templates.TargetType.globals if n_graphs > 1 else templates.TargetType.nodes
model = experiments.GNN(10, 10, 10, 1, gnn_models.BlockType('gcn'), 'relu',
target_type, 3)
| return model
@parameterized.named_parameters(('constant', 1024, 256, 4),
('droplast', 1000, 256, 3))
def test_get_batch_indices(self, n, batch_size, expected_n_b | atches):
batch_indices = training.get_batch_indices(n, batch_size)
self.assertEqual(batch_indices.shape, (expected_n_batches, batch_size))
@parameterized.parameters([0.2, 1.0])
def test_augment_binary_task(self, fraction):
"""Check that data augmention sizes are correct."""
initial_n = 10
x, y = self._setup_graphs_labels(initial_n)
node_vec = np.zeros_like(x.nodes[0])
edge_vec = np.zeros_like(x.edges[0])
initial_positive = int(np.sum(y == 1))
aug_n = int(np.floor(fraction * initial_positive))
expected_n = initial_n + aug_n * 2
x_aug, y_aug = training.augment_binary_task(x, y, node_vec, edge_vec,
fraction)
self.assertEqual(graph_utils.get_num_graphs(x_aug), expected_n)
self.assertLen(y_aug, expected_n)
# Make sure half of the augmented examples are positive labels.
aug_positive = np.sum(y_aug == 1) - initial_positive
self.assertEqual(aug_positive, aug_n)
@parameterized.named_parameters(('onegraph', 1),
('minibatch', 25))
def test_make_tf_opt_epoch_fn(self, batch_size):
"""Make sure tf-optimized epoch gives a valid loss."""
x, y = self._setup_graphs_labels(batch_size)
model = self._setup_model(batch_size)
opt = snt.optimizers.Adam()
loss_fn = tf.keras.losses.BinaryCrossentropy()
opt_fn = training.make_tf_opt_epoch_fn(x, y, batch_size, model, opt,
loss_fn)
loss = opt_fn(x, y).numpy()
self.assertTrue(np.isfinite(loss))
if __name__ == '__main__':
tf.config.experimental_run_functions_eagerly(True)
absltest.main()
|
airbnb/airflow | tests/providers/apache/hive/sensors/test_hive_partition.py | Python | apache-2.0 | 1,659 | 0.002411 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from unittest.mock import patch
from airflow.providers.apache.hive.sensors.hive_partition import HivePartitionSensor
from tests.providers.apache.hive import DEFAULT_DATE, TestHiveEnvironment
from tests.test_utils.mock_hooks import MockHiveMetastoreHook
@unittest.skipIf('AIRFLOW_RUNALL_TESTS' not in os.environ, "Skipped because AIRFLOW_RUNALL_TES | TS is not set")
@patch(
'airflow.providers.apache.hive.sensors.hive_partit | ion.HiveMetastoreHook',
side_effect=MockHiveMetastoreHook,
)
class TestHivePartitionSensor(TestHiveEnvironment):
def test_hive_partition_sensor(self, mock_hive_metastore_hook):
op = HivePartitionSensor(
task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
|
emirot/codefights | python/simpleSort.py | Python | apache-2.0 | 248 | 0 | def simpleSort(arr):
n = len(arr)
for i in range | (n):
j = 0
stop = n - | i
while j < stop - 1:
if arr[j] > arr[j + 1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
j += 1
return arr
|
cemmanouilidis/naturalscrolling | naturalscrolling/indicator.py | Python | gpl-3.0 | 3,094 | 0.000646 | ### BEGIN LICENSE
# Copyright (C) 2011 Guillaume Hain <zedtux@zedroot.org>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along
# with this program. If not | , see <http://www.gnu.org/licenses/>
### END LICENSE
import gtk
import appindicator
from naturalscrolling_lib import naturalscrollingconfig
from naturalscrolling_lib.gconfsettings impor | t GConfSettings
from naturalscrolling_lib.udevobservator import UDevObservator
from naturalscrolling.indicatormenu import IndicatorMenu
class Indicator(object):
# Singleton
_instance = None
_init_done = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Indicator, cls).__new__(cls, *args,
**kwargs)
return cls._instance
def __init__(self):
# Initialize a new AppIndicator
self.indicator = appindicator.Indicator(
"natural-scrolling-indicator",
"natural-scrolling-status-not-activated",
appindicator.CATEGORY_APPLICATION_STATUS)
media_path = "%s/media/" % naturalscrollingconfig.get_data_path()
self.indicator.set_icon_theme_path(media_path)
self.indicator.set_attention_icon(
"natural-scrolling-status-activated")
menu = IndicatorMenu()
self.indicator.set_menu(menu)
# Initialize the UDev client
udev_observator = UDevObservator()
udev_observator.on_update_execute(menu.refresh)
udev_observator.start()
# Force the first refresh of the menu in order to populate it.
menu.refresh(udev_observator.gather_devices())
# When something change in GConf, push it to the Indicator menu
# in order to update the status of the device as checked or unchecked
GConfSettings().server().on_update_fire(menu.update_check_menu_item)
# Initialize GConf in order to be up-to-date with existing devices
GConfSettings().initialize(udev_observator.gather_devices())
def status_attention(self):
self.set_status(appindicator.STATUS_ATTENTION)
def status_active(self):
self.set_status(appindicator.STATUS_ACTIVE)
def isreversed(self):
return True
def check_scrolling(self):
if self.isreversed():
self.indicator.set_status(appindicator.STATUS_ATTENTION)
else:
self.indicator.set_status(appindicator.STATUS_ACTIVE)
return True
def start(self):
self.check_scrolling()
try:
gtk.main()
except KeyboardInterrupt:
pass
|
arneck/Macropores | biopore_detect.py | Python | gpl-3.0 | 12,967 | 0.022056 | # coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as snd
import seaborn as sns
from skimage import img_as_float, morphology, measure
from skimage.color import rgb2hsv
from skimage.morphology import reconstruction
from skimage.exposure import rescale_intensity
from skimage.measure import label
from astropy.table import Table
from scipy import spatial
from skimage.filters import sobel
from skimage.feature import peak_local_max
def biop_det(fi, mp_threshold, patch_threshold, perc,px, plot=True, morph=False,testing=True):
"""
Function for detecting biopores to analyse their spatial arrangement & matrix interaction
Line 105:134 are adapted from the preprocessor of the echoRD-model by C. Jackisch.
For further informations see: https://github.com/cojacoo/echoRD_model/tree/master/echoRD
file "macropore_ini.py"
Parameters |
----------
fi : input image ('.png'-format, either as rgb or rgba image)
mp_threshold : lower limit for removing small macropores
patch_threshold : min [0] and max [1] of the desired patch
size limits (usually min=100,max=10000)
perc : value up to which percentile gray values among to biopores
(0.125 shows good results for brighter soil matrix)
px : actual length of one pixel in input image [mm | ]
plot : True/False: whether results should be plotted (default:True)
morph : if True the morphology of detected biopores will be plotted, otherwise pores are displayed as
scatterplot and distinguished whether stained or not (default)
testing : if True no distances are calculated and only the detected macropores are
plotted to reduce computing time during threshold adjustment (default),
otherwise all distances are computed
Output
------
Dictionary with following keys:
'biopores' : labeled biopores
'biopores_centroidxy' : x/y-coordinates of detected biopores
'biopores_stained_centroidxy' : x/y-coordinates of detected stained biopores
'biopores_area' : area of detected biopores (number of pixels)
'biopores_diameter' : diameter of detected biopores (diameter of circle with same area [mm])
'distance_matrix_biopore' : distance of each image pixel to nearest biopore [mm]
'distance_matrix_stained_biopore' : distance of each image to nearest stained biopore [mm]
'biopore_matrix_interaction' : distance of pixels from stained patches including at least one
biopore to nearest stained biopore [mm] (estimation of biopore-matrix interaction)
'stained_patches' : labeled blue-stained patches
'patches_with_biopores' : detected blue-stained patches including at least one biopore
'table' : summary table with number and main propertiesd of detected biopores
'stained_index' : index of stained biopores
'unstained_index' : index of unstained biopores
"""
im_raw = snd.imread(fi) # load image
sim = np.shape(im_raw)
if sim[2]==4:
imrgb=im_raw[:,:,:3]
else:
imrgb=im_raw
imhsv = rgb2hsv(imrgb) # convert RGB image to HSV color-space
img = imhsv[:,:,2] # extract value channel
im = img_as_float(imrgb) # load image as float for detection of stained patches
sim = np.shape(im) # extract dimensions of input image
# morphological reconstruction for detecting holes inside the picture (according to general example
# "filling holes and detecting peaks" from scikit-image http://scikit-image.org/docs/dev/auto_examples/features_detection/plot_holes_and_peaks.html#sphx-glr-auto-examples-features-detection-plot-holes-and-peaks-py)
seed = np.copy(img)
seed[1:-1, 1:-1] = img.max()
mask = img
filled = reconstruction(seed, mask, method='erosion')
holes=img-filled
# rescale and extract macropores
holes_resc=rescale_intensity(holes,out_range=(0.0,1))
thresh=np.percentile(holes_resc,perc)
holes_resc[holes_resc>thresh]=1
holes_resc[holes_resc<thresh]=0
bp_label=label(holes_resc,neighbors=8, background=1)
bp_label[bp_label==-1]=0
# remove objects smaller than threshold
bp_label_clean = morphology.remove_small_objects(bp_label, min_size=mp_threshold)
# detect and label blue stained patches
# calculate difference of channels to extract blue stained patches
dim=abs(im[:,:,1]-im[:,:,0])
# discard low contrasts
dim[dim<0.2]=0.0
# filter to local maxima for further segmentation
# process segmentation according to sobel function of skimage
image_max = snd.maximum_filter(dim, size=5, mode='constant')
elevation_map = sobel(dim)
markers = np.zeros_like(dim)
markers[image_max < 0.1] = 2
markers[image_max > 0.2] = 1
segmentation = morphology.watershed(elevation_map, markers)
segmentation = snd.binary_fill_holes(1-(segmentation-1))
# clean patches below theshold
patches_cleaned = morphology.remove_small_objects(segmentation, patch_threshold[0])
labeled_patches = label(patches_cleaned)
sizes = np.bincount(labeled_patches.ravel())[1:] #first entry (background) discarded
# reanalyse for large patches and break them by means of watershed segmentation
idx=np.where(sizes>patch_threshold[1])[0]+1
labeled_patches_large=labeled_patches*0
idy=np.in1d(labeled_patches,idx).reshape(np.shape(labeled_patches))
labeled_patches_large[idy]=labeled_patches[idy]
distance = snd.distance_transform_edt(labeled_patches_large)
footp=int(np.round(np.sqrt(patch_threshold[1])/100)*100)
local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((footp, footp)),labels=labeled_patches_large)
markers = snd.label(local_maxi)[0]
labels_broken_large = morphology.watershed(-distance, markers, mask=labeled_patches_large)
labeled_patches[idy]=labels_broken_large[idy]+np.max(labeled_patches)
# measure regionproperties of biopores
meas_bp=measure.regionprops(bp_label_clean, intensity_image=None)
bp_labels = np.unique(bp_label_clean)[1:]
bp_centroidx = bp_labels.astype(np.float64)
bp_centroidy = bp_labels.astype(np.float64)
bp_area = bp_labels.astype(np.float64)
bp_diameter = bp_labels.astype(np.float64)
# extract regionprops for each labeled biopore
for i in np.arange(len(bp_labels)):
bp_centroidx[i], bp_centroidy[i]=meas_bp[i]['centroid']
bp_area[i]=(meas_bp[i]['area'])
bp_diameter[i]=(meas_bp[i]['equivalent_diameter'])*px
bp_centroidxy = np.stack((bp_centroidx,bp_centroidy), axis=-1)
# extract biopores inside stained areas = "stained biopores"
stain_info=np.zeros(len(bp_centroidxy))
rbp_centroidxy=np.around(bp_centroidxy).astype(int)
for i in np.arange(len(bp_centroidxy)):
if labeled_patches[rbp_centroidxy[i,0],rbp_centroidxy[i,1]]>0:
stain_info[i]=1
else:
stain_info[i]=2
stained=np.where(stain_info==1)
unstained=np.where(stain_info==2)
# select value of stained patches including an biopore
bp_stained=np.around(bp_centroidxy[stained]).astype(int)
label_value=np.zeros(len(bp_stained)).astype(int)
for i in np.arange(len(bp_stained)):
label_value[i]=labeled_patches[bp_stained[i,0], bp_stained[i,1]]
# remove labeled patches without any biopore
label_withbp=np.copy(labeled_patches)
for i in np.arange(len(label_value)):
label_withbp[label_withbp==label_value[i]]=-1
label_withbp[label_withbp!=-1]=0
label_withbp[label_withbp==-1]=1
# distance calculations
if testing==False:
# Compute Euclidian distance for each pixel to nearest biopore
m_bp_dist = np.zeros((sim[0],sim[1]))
for i in np.arange(sim[0]):
for j in np.arange(sim[1]):
matrixp1=[i,j]
m_bp_dist[i,j]=spatial.KDTree(bp_centroidxy).query(matrixp1,p=2)[0]
# compute Euclidian distance for each pixel to nearest stained biopore
m_stbp_dist=np.zeros((sim[0],sim[1]))
for i in np.arange(sim[0]) |
LLNL/spack | var/spack/repos/builtin.mock/packages/git-test/package.py | Python | lgpl-2.1 | 396 | 0 | # Copyright 2013-2021 Lawrence Livermore National Secu | rity, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
from spack import *
class GitTest(Package):
"""Mock package that uses git for fetching."""
homepage = "http://www.git-fetch-example.com"
version('git', git='to-be-filled-in-by-test')
|
unreal666/outwiker | src/outwiker/gui/dialogs/overwritedialog.py | Python | gpl-3.0 | 3,191 | 0.000331 | # -*- coding: utf-8 -*-
import wx
from outwiker.gui.testeddialog import TestedDialog
class OverwriteDialog(TestedDialog):
def __init__(self, *args, **kwds):
super(OverwriteDialog, self).__init__(*args, **kwds)
self.textLabel = wx.StaticText(self,
-1,
_("Overwrite file?"),
style=wx.ALIGN_CENTRE)
self.overwrite = wx.Button(self, -1, _("Overwrite"))
self.overwriteAll = wx.Button(self, -1, _("Overwrite all"))
self.skip = wx.Button(self, -1, _("Skip"))
self.skipAll = wx.Button(self, -1, _("Skip all"))
self.cancel = wx.Button(self, wx.ID_CANCEL, _("Cancel"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.onOverwrite, self.overwrite)
self.Bind(wx.EVT_BUTTON, self.onOverwriteAll, self.overwriteAll)
self.Bind(wx.EVT_BUTTON, self.onSkip, self.skip)
self.Bind(wx.EVT_BUTTON, self.onSkipAll, self.skipAll)
self.ID_OVERWRITE = 1
self.ID_SKIP = 2
# Флаг, который сохраняет выбор пользователя,
# чтобы не показывать диалог после выбора "... all"
self.flag = 0
self.SetEscapeId(wx.ID_CANCEL)
self.Center(wx.BOTH)
def __set_properties(self):
self.SetTitle(_("Overwrite Files"))
self.overwrite.SetFocus()
self.overwrite.SetDefault()
def __do_layout(self):
sizer_1 = wx.FlexGridSizer(cols=1)
sizer_1.AddGrowableCol(0)
sizer_1.AddGrowableRow(1)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.textLabel,
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL,
border=10)
sizer_2.Add(self.overwrite, flag=wx.ALL, border=4)
sizer_2.Add(self.overwriteAll, flag=wx.ALL, border=4)
sizer_2.Add(self.skip, flag=wx.ALL, border=4)
sizer_2.Add(self.skipAll, flag=wx.ALL, border=4)
sizer_2.Add(self.cancel, flag=wx.ALL, border=4)
sizer_1.Add(sizer_2,
| flag=wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL | wx.ALL,
border=4)
self.SetSizer(sizer_1)
self.Fit()
def ShowDialog(self, text):
"""
Показать диалог, если нужно спросить, что делать с файлов.
Этот метод вызывается вместо Show/ShowModal.
text - текст для сообщения в диалоге
"""
if se | lf.flag == 0:
self.textLabel.SetLabel(text)
self.Layout()
return self.ShowModal()
return self.flag
def onOverwrite(self, event):
self.EndModal(self.ID_OVERWRITE)
def onOverwriteAll(self, event):
self.flag = self.ID_OVERWRITE
self.EndModal(self.ID_OVERWRITE)
def onSkip(self, event):
self.EndModal(self.ID_SKIP)
def onSkipAll(self, event):
self.flag = self.ID_SKIP
self.EndModal(self.ID_SKIP)
|
tyc85/nwsdr-3.6.3-dsc | gr-uhd/grc/gen_uhd_usrp_blocks.py | Python | gpl-3.0 | 11,243 | 0.038246 | """
Copyright 2010-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
MAIN_TMPL = """\
<?xml version="1.0"?>
<block>
<name>UHD: USRP $sourk.title()</name>
<key>uhd_usrp_$(sourk)</key>
<throttle>1</throttle>
<import>from gnuradio import uhd</import>
<make>uhd.usrp_$(sourk)(
device_addr=\$dev_addr,
stream_args=uhd.stream_args(
cpu_format="\$type",
\#if \$otw()
otw_format=\$otw,
\#end if
\#if \$stream_args()
args=\$stream_args,
\#end if
channels=range(\$nchan),
),
)
\#if \$clock_rate()
self.\$(id).set_clock_rate(\$clock_rate, uhd.ALL_MBOARDS)
\#end if
#for $m in range($max_mboards)
########################################################################
\#if \$num_mboards() > $m and \$clock_source$(m)()
self.\$(id).set_clock_source(\$clock_source$(m), $m)
\#end if
########################################################################
\#if \$num_mboards() > $m and \$time_source$(m)()
self.\$(id).set_time_source(\$time_source$(m), $m)
\#end if
########################################################################
\#if \$num_mboards() > $m and \$sd_spec$(m)()
self.\$(id).set_subdev_spec(\$sd_spec$(m), $m)
\#end if
########################################################################
#end for
\#if \$sync()
self.\$(id).set_time_unknown_pps(uhd.time_spec())
\#end if
self.\$(id).set_samp_rate(\$samp_rate)
#for $n in range($max_nchan)
\#if \$nchan() > $n
self.\$(id).set_center_freq(\$center_freq$(n), $n)
self.\$(id).set_gain(\$gain$(n), $n)
\#if \$ant$(n)()
self.\$(id).set_antenna(\$ant$(n), $n)
\#end if
\#if \$bw$(n)()
self.\$(id).set_bandwidth(\$bw$(n), $n)
\#end if
\#end if
#end for
</make>
<callback>set_samp_rate(\$samp_rate)</callback>
#for $n in range($max_nchan)
<callback>set_center_freq(\$center_freq$(n), $n)</callback>
<callback>set_gain(\$gain$(n), $n)</callback>
<callback>set_antenna(\$ant$(n), $n)</callback>
<callback>set_bandwidth(\$bw$(n), $n)</callback>
#end for
<param>
<name>$(direction.title())put Type</name>
<key>type</key>
<type>enum</type>
<option>
<name>Complex float32</name>
<key>fc32</key>
<opt>type:fc32</opt>
</option>
<option>
<name>Complex int16</name>
<key>sc16</key>
<opt>type:sc16</opt>
</option>
<option>
<name>VITA word32</name>
<key>item32</key>
<opt>type:s32</opt>
</option>
</param>
<param>
<name>Wire Format</name>
<key>otw</key>
<value></value>
<type>string</type>
<hide>
\#if \$otw()
none
\#else
part
\#end if
</hide>
<option>
<name>Automatic</name>
<key></key>
</option>
<option>
<name>Complex int16</name>
<key>sc16</key>
</option>
<option>
<name>Complex int8</name>
<key>sc8</key>
</option>
</param>
<param>
<name>Stream args</name>
<key>stream_args</key>
<value></value>
<type>string</type>
<hide>
\#if \$stream_args()
none
\#else
part
\#end if
</hide>
<option>
<name>scalar=1024</name>
<key>scalar=1024</key>
</option>
</param>
<param>
<name>Device Addr</name>
<key>dev_addr</key>
<value></value>
<type>string</type>
<hide>
\#if \$dev_addr()
none
\#else
part
\#end if
</hide>
</param>
<param>
<name>Sync</name>
<key>sync</key>
<value></value>
<type>enum</type>
<hide>\#if \$sync() then 'none' else 'part'#</hide>
<option>
<name>unknown PPS</name>
<key>sync</key>
</option>
<option>
<name>don't sync</name>
<key></key>
</option>
</param>
<param>
<name>Clock Rate (Hz)</name>
<key>clock_rate</key>
<value>0.0</value>
<type>real</type>
<hide>\#if \$clock_rate() then 'none' else 'part'#</hide>
<option>
<name>Default</name>
<key>0.0</key>
</option>
</param>
<param>
<name>Num Mboards</name>
<key>num_mboards</key>
<value>1</value>
<type>int</type>
<hide>part</hide>
#for $m in range(1, $max_mboards+1)
<option>
<name>$(m)</name>
<key>$m</key>
</option>
#end for
</param>
#for $m in range($max_mboards)
<param>
<name>Mb$(m): Clock Source</name>
<key>clock_source$(m)</key>
<value></value>
<type>string</type>
<hide>
\#if not \$num_mboards() > $m
all
\#elif \$clock_source$(m)()
none
\#else
part
\#end if
</hide>
<option><name>Default</name><key></key></option>
<option><name>Internal</name><key>internal</key></option>
<option><name>External</name><key>external</key></option>
<option><name>MIMO Cable</name><key>mimo</key></option>
<option><name>O/B GPSDO</name><key>gpsdo</key></option>
</param>
<param>
<name>Mb$(m): Time Source</name>
<key>time_source$(m)</key>
<value></value>
<type>string</type>
<hide>
\#if not \$num_mboards() > $m
all
\#elif \$time_source$(m)()
none
\#else
part
\#end if
</hide>
<option><name>Default</name><key></key></option>
<option><name>External</name><key>external</key></option>
<option><name>MIMO Cable</name><key>mimo</key></option>
<option><name>O/B GPSDO</name><key>gpsdo</key></option>
</param>
<param>
<name>Mb$(m): Subdev Spec</name>
<key>sd_spec$(m)</key>
<value></value>
<type>string</type>
<hide>
\#if not \$num_mboards() > $m
all
\#elif \$sd_spec$(m)()
none
\#else
part
\#end if
</hide>
</param>
#end for
<param>
<name>Num Channels</name>
<key>nchan</key>
<value>1</value>
<type>int</type>
#for $n in range(1, $max_nchan+1)
<option>
<name>$(n)</name>
<key>$n</key>
</option>
#end for
</param>
<param>
<name>Samp Rate (Sps)</name>
<key>samp_rate</key>
<value>samp_rate</value>
<type>real</type>
</param>
$params
<check>$max_nchan >= \$nchan</check>
<check>\$nchan > 0</check>
<check>$max_mboards >= \$num_mboards</check>
<check>\$num_mboards > 0</check>
<check>\$nchan >= \$num_mboards</check>
<$sourk>
<name>$direction</name>
<type>\$type.type</type>
<nports>\$nchan</nports>
</$sourk>
<doc>
The UHD USRP $sourk.title() Block:
Device Address:
The device address is a delimited string used to locate UHD devices on your system. \\
If left blank, the first UHD device found will be used. \\
Use the device address to specify a specific device or list of devices.
USRP1 Example: serial=12345678
USRP2 Example: addr=192.168.10.2
USRP2 Example: addr0=192.168.10.2, addr1=192.168.10.3
$(direction.title()) Type:
This parameter controls the data type of the stream in gn | uradio.
Wire Format:
This parameter controls the form of the data | over the bus/network. \
Complex bytes may be used to trade off precision for bandwidth. \
Not all formats are supported on all devices.
Stream Args:
Optional arguments to be passed in the UHD streamer object. \
Streamer args is a list of key/value pairs; usage is determined by the implementation.
Ex: the scalar key affects the scaling between 16 and 8 bit integers in sc8 wire format.
Num Motherboards:
Selects the number of USRP motherboards in this device configuration.
Reference Source:
Where the motherboard should sync its time and clock references.
If source and sink blocks reference the same device,
it is only necessary to set the reference source on one of the blocks.
Subdevice specification:
Each motherboard should have its own subdevice specification \\
and all subdevice specifications should be the same length. \\
Select the subdevice or subdevices for each channel using a markup string. \\
The markup string consists of a list of dboard_slot:subdev_name pairs (one pair per channel). \\
If left blank, the UHD wil |
StackHut/stackhut-toolkit | stackhut_toolkit/common/runtime/rpc.py | Python | apache-2.0 | 9,386 | 0.003729 | # Copyright 2015 StackHut Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
StackHut interface and modifications to Barrister RPC library
"""
import os
import json
import uuid
import signal
from enum import Enum
import sh
from ..barrister import err_response, ERR_PARSE, ERR_INVALID_REQ, ERR_METHOD_NOT_FOUND, \
ERR_INVALID_PARAMS, ERR_INTERNAL, ERR_UNKNOWN, ERR_INVALID_RESP, \
parse, contract_from_file, RpcException
from ..utils import log
CONTRACTFILE = '.api.json'
IDLFILE = 'api.idl'
REQ_FIFO = '.req.json'
RESP_FIFO = '.resp.json'
"""
High-level interface into the IDL file
- based on the JSON compiled output that is parsed into an AST
- used from runtime introspection
"""
class ContactTypes(Enum):
int = 1
string = 2
bool = 3
array = 4
obj = 5
def render_signature(func):
def render_params(p):
pp_p = "{} {}".format(p.type, p.name)
return '[]' + pp_p if p.is_array else pp_p
params_t = str.join(', ', [render_params(p) for p in func.params])
if func.returns is not None:
return "{}({}) {}".format(func.name, params_t, render_params(func.returns))
else:
return "{}({}) {}".format(func.name, params_t)
def load_contract_file():
return contract_from_file(CONTRACTFILE)
def generate_contract_file():
"""
Generate the IDL -> JSON Contract file
main interface into barrister parser
"""
if not os.path.exists(IDLFILE):
raise AssertionError("Cannot find 'api.idl' interface definition file")
with open(IDLFILE, 'r') as idl_file, open(CONTRACTFILE, "w") as contract_file:
parsed = parse(idl_file, IDLFILE)
contract_file.write(json.dumps(parsed, indent=4))
##### | ###############################################################################################
# Error handling
ERR_SERVICE = -32002
class ParseError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_PARSE, 'Parse Error', data)
class InvalidReqError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_INVALID_REQ, 'Invalid Request', data)
class MethodNotFoundErro | r(RpcException):
def __init__(self, data=None):
super().__init__(ERR_METHOD_NOT_FOUND, 'Method Not Found', data)
class InternalError(RpcException):
def __init__(self, msg='', data=None):
super().__init__(ERR_INTERNAL, 'Internal Error - {}'.format(msg), data)
class ServiceError(RpcException):
def __init__(self, msg, data=None):
super().__init__(ERR_SERVICE, 'Service Error - {}'.format(msg), data)
class CustomError(RpcException):
def __init__(self, code, msg, data=None):
super().__init__(code, 'Error - {}'.format(msg), data)
class NonZeroExitError(RpcException):
def __init__(self, exit_code, stderr):
data = dict(exit_code=exit_code, stderr=stderr)
super().__init__(-32001, 'Sub-command returned a non-zero exit', data)
def exc_to_json_error(e, req_id=None):
return err_response(req_id, e.code, e.msg, e.data)
from enum import Enum
class SHCmds(Enum):
startup = 1
shutdown = 2
preBatch = 3
postBatch = 4
def add_get_id(d):
"""add id to json rpc if not present"""
if 'id' not in d:
d['id'] = str(uuid.uuid4())
return d['id']
class StackHutRPC:
"""
Alt. implementation of Barrister.server modified for StackHut needs
Performs
* 'Type'-checking of requests and responces per interface def
* loading the lang-specfic shim/client
* passing messages between the runner and shim/client process
"""
def __init__(self, backend, shim_cmd):
self.contract = contract_from_file(CONTRACTFILE)
self.backend = backend
# setup fifos
os.mkfifo(REQ_FIFO)
os.mkfifo(RESP_FIFO)
# run the shim
cmd = sh.Command(shim_cmd[0])
self.p = cmd(shim_cmd[1:], _bg=True, _out=lambda x: log.debug("Runner - {}".format(x.rstrip())),
_err=lambda x: log.error("Runner - {}".format(x.rstrip())))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
def handler(signum, frame):
log.error("Force-quitting RPC subprocess")
self.p.kill()
raise TimeoutError()
# Set the signal handler and a 5-second alarm
signal.signal(signal.SIGALRM, handler)
signal.alarm(5)
# send shutdown msg to each iface
for iface in self.contract.interfaces.keys():
log.debug("Send shutdown to {}".format(iface))
self._cmd_call('{}.{}'.format(iface, SHCmds.shutdown.name))
log.debug("Terminating RPC sub-process")
try:
self.p.terminate()
self.p.wait()
except sh.SignalException_15:
log.warn("RPC subprocess shutdown uncleanly")
pass
signal.alarm(0)
def _cmd_call(self, cmd):
log.debug('Sending cmd message - {}'.format(cmd))
resp = self._sub_call(cmd, [], 'shcmd')
log.debug("Cmd response - {}".format(resp))
def _req_call(self, req):
"""Make RPC call for a single request"""
req_id = None
try:
if type(req) is not dict:
raise InvalidReqError(dict(msg="%s is not an object.".format(req)))
# massage the data (if needed)
req_id = add_get_id(req)
if 'jsonrpc' not in req:
req['jsonrpc'] = "2.0"
if "method" not in req:
raise InvalidReqError(dict(msg="No method"))
# return the idl - TODO - move into Scala
if req['method'] == "common.barrister-idl" or req['method'] == "getIdl":
return self.contract.idl_parsed
# add the default interface if none exists
if req['method'].find('.') < 0:
req['method'] = "{}.{}".format('Default', req['method'])
# NOTE - would setup context and run pre/post filters here in Barrister
# Ok, - we're good to go
method = req["method"]
iface_name, func_name = method.split('.')
params = req.get('params', [])
self.contract.validate_request(iface_name, func_name, params)
result = self._sub_call(method, params, req_id)
self.contract.validate_response(iface_name, func_name, result)
resp = dict(jsonrpc="2.0", id=req_id, result=result)
except RpcException as e:
resp = exc_to_json_error(e, req_id)
except Exception as e:
_e = InternalError('Exception', dict(exception=repr(e)))
resp = exc_to_json_error(_e, req_id)
return resp
def _sub_call(self, method, params, req_id):
"""Acutal call to the shim/client subprocess"""
self.backend.create_request_dir(req_id)
# create the (sub-)req
sub_req = dict(method=method, params=params, req_id=req_id)
# blocking-wait to send the request
with open(REQ_FIFO, "w") as f:
f.write(json.dumps(sub_req))
# blocking-wait to read the resp
with open(RESP_FIFO, "r") as f:
sub_resp = json.loads(f.read())
# check the response
if 'error' in sub_resp:
error_code = sub_resp['error']
log.debug(sub_resp)
if error_code == ERR_METHOD_NOT_FOUND:
raise MethodNotFoundError()
elif error_code == ERR_INTERNAL:
raise InternalError(sub_resp['msg'], sub_resp['data'])
else:
raise CustomError(error_code, sub_resp['msg'], sub_resp['data'])
|
moloch--/RootTheBox | alembic/versions/5ca019edf61f_cascade_on_delete.py | Python | apache-2.0 | 12,722 | 0.001572 | """Cascade on Delete
Revision ID: 5ca019edf61f
Revises: 469f428604aa
Create Date: 2019-06-23 05:49:26.061932
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "5ca019edf61f"
down_revision = "469f428604aa"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("penalty") as batch_op:
batch_op.drop_constraint("penalty_ibf | k_1", type_="foreignkey")
batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="CASCADE"
)
| op.create_foreign_key(
"penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="CASCADE"
)
with op.batch_alter_table("snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_ibfk_1",
"snapshot_team",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_1",
"snapshot_to_snapshot_team",
"snapshot",
["snapshot_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_2",
"snapshot_to_snapshot_team",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_team_to_flag") as batch_op:
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_1",
"snapshot_team_to_flag",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_2",
"snapshot_team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_team_to_game_level") as batch_op:
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_1", type_="foreignkey"
)
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_2", type_="foreignkey"
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_1",
"snapshot_team_to_game_level",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_2",
"snapshot_team_to_game_level",
"game_level",
["gam_level_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_box") as batch_op:
batch_op.drop_constraint("team_to_box_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_box_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_box_ibfk_1",
"team_to_box",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_box_ibfk_2",
"team_to_box",
"box",
["box_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_item") as batch_op:
batch_op.drop_constraint("team_to_item_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_item_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_item_ibfk_1",
"team_to_item",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_item_ibfk_2",
"team_to_item",
"market_item",
["item_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_source_code") as batch_op:
batch_op.drop_constraint("team_to_source_code_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_source_code_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_source_code_ibfk_1",
"team_to_source_code",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_source_code_ibfk_2",
"team_to_source_code",
"source_code",
["source_code_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_hint") as batch_op:
batch_op.drop_constraint("team_to_hint_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_hint_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_hint_ibfk_1",
"team_to_hint",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_hint_ibfk_2",
"team_to_hint",
"hint",
["hint_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_flag") as batch_op:
batch_op.drop_constraint("team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_flag_ibfk_1",
"team_to_flag",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_flag_ibfk_2",
"team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_game_level") as batch_op:
batch_op.drop_constraint("team_to_game_level_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_game_level_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_game_level_ibfk_1",
"team_to_game_level",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_game_level_ibfk_2",
"team_to_game_level",
"game_level",
["game_level_id"],
["id"],
ondelete="CASCADE",
)
def downgrade():
with op.batch_alter_table("penalty") as batch_op:
batch_op.drop_constraint("penalty_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="RESTRICT"
)
op.create_foreign_key(
"penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="RESTRICT"
)
with op.batch_alter_table("snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_ibfk_1",
"snapshot_team",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_1",
"snapshot_to_snapshot_team",
"snapshot",
["snapshot_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_2",
"snapshot_to_snapshot_team",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_team_to_flag") as batch_op:
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey")
op.cr |
ZellMechanik-Dresden/ShapeOut | shapeout/gui/controls_statistics.py | Python | gpl-2.0 | 2,684 | 0.001863 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Shape-Out - statistics display"""
from __future__ import division, print_function, unicode_literals
import wx
import wx.grid as gridlib
from .. import util
from .controls_subpanel import SubPanel
class SubPanelStatistics(SubPanel):
def __init__(self, *args, **kwargs):
SubPanel.__init__(self, *args, **kwargs)
def _box_statistics(self, analysis):
"""
Returns a wxBoxSizer with statistics information about
each element in analysis.
"""
sizer = wx.BoxSizer(wx.VERTICAL)
if analysis is not None:
colors = []
for mm in analysis.measurements:
colors.append(mm.config["plotting"]["contour color"])
head, datalist = analysis.GetStatisticsBasic()
myGrid = gridlib.Grid(self)
myGrid.CreateGrid(len(datalist), len(head)-1)
sizer.Add(myGrid, 1, wx.EXPAND)
for ii, label in enumerate(head[1:]):
myGrid.SetColLabelValue(ii, label)
myGrid.SetColSize(ii, 10*len(label))
for jj, row, color in zip(range(len(datalist)), datalist, colors):
if analysis.GetParameters("plotting")["scatter title colored"]:
if isinstance(color, (list, tuple)):
color = util.rgb_to_hex(color[:3], norm=1)
else:
color = "black"
for ii, item in enumerate(row):
if isinstance(item, (str, unicode)):
label = u" {} ".format(item)
else:
label = u" {} ".format(util.float2string_nsf(item, n=3))
if ii is 0:
myGrid.SetRowLabelValue(jj, label)
oldsize = myGrid.GetRowLabelSize()
newsize = len(label)*10
myGrid.SetRowLabelSize(max(oldsize, newsize))
else:
myGrid.SetCellValue(jj, ii-1, label)
myGrid.SetCellTextColour(jj, ii-1, color)
myGrid.SetReadOnly(jj, ii-1)
sizer.Layout()
return sizer
def RepopulatePanel(self, analys | is):
self.ClearSubPanel()
# Create three boxes containing information
sizer = wx.BoxSizer(wx.HORIZONTAL)
statbox = self._box_statistics(analysis)
# same size
h = statbox.GetMinSize()[1]
h = max(h, 50)
statbox.SetMinSize((-1, h | ))
sizer.Add(statbox)
self.SetSizer(sizer)
sizer.Fit(self)
|
animesh21/booklogue | config.py | Python | mit | 204 | 0 | import os
basedir = os.path.abspath(os.path.dirname(__file__ | ))
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://user:user@localhost/booklogue'
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db.repository' | )
|
bdfoster/blumate | tests/util/test_location.py | Python | mit | 1,684 | 0 | """Test Home Assistant location util methods."""
# pylint: disable=too-many-public-methods
import unittest
import blumate.util.location as location_util
# Paris
COORDINATES_PARIS = (48.864716, 2.349014)
# New York
COORDINATES_NEW_YORK = (40.730610, -73.935242)
# Results for the assertion (vincenty algorithm):
# Distance [km] Distance [miles]
# [0] 5846.39 3632.78
# [1] 5851 3635
#
# [0]: http://boulter.com/gps/distance/
# [1]: https://www.wolframalpha.com/input/?i=from+paris+to+new+york
DISTANCE_KM = 5846.39
DISTANCE_MILES = 3632.78
class TestLocationUtil(unittest.TestCase):
"""Test util location methods."""
def test_get_distance(self):
"""Test getting the distance."""
meters = location_util.distance(COORDINATES_PARIS[0],
COORDINATES_PARIS[1],
COORDINATES_NEW_YORK[0],
COORDINATES_NEW_YORK[1])
self.assertAlmostEqual(meters / 1000, DISTANCE_KM, places=2)
def test_get_kilometers(self):
"""Test getting the distance between given coordinates in km."""
kilometers = location_util.vincenty(COORDINATES_PARIS,
| COORDINATES_NEW_YORK)
self.assertEqual(round(kilometers, 2), DISTANCE_KM)
def test_get_miles(self):
"""Test getting the distance between given coordinates in miles."""
miles = location_util.vincenty(COORDINA | TES_PARIS,
COORDINATES_NEW_YORK,
miles=True)
self.assertEqual(round(miles, 2), DISTANCE_MILES)
|
Sinar/popit_ng | popit/tests/test_update_es.py | Python | agpl-3.0 | 1,573 | 0.005086 | from popit.tasks import update_node
from django.test import TestCase
from mock import patch
from popit.models import *
from popit.serializers import *
class UpdateESTask(TestCase):
fixtures = ["api_request_test_data.yaml"]
@patch("popit_search.utils.search.SerializerSearch")
def test_update_node_first_index(self, mock_search):
instance = mock_search.return_value
instance.search.return_value = []
node = ("organizations", "3d62d9ea-0600-4f29-8ce6-f7720fd49aa3", "update")
update_node(node)
| organization = Organization.objects.language("en").get(id="3d62d9ea-0600-4f29-8ce6-f7720fd49aa3")
instance.add.assert_called_with(organization, OrganizationSeriali | zer)
@patch("popit_search.utils.search.SerializerSearch")
def test_delete_node(self, mock_search):
instance = mock_search.return_value
node = ("organizations", "3d62d9ea-0600-4f29-8ce6-f7720fd49aa3", "delete")
update_node(node)
instance.delete_by_id.assert_called_with("3d62d9ea-0600-4f29-8ce6-f7720fd49aa3")
@patch("popit_search.utils.search.SerializerSearch")
def test_update_node_existing_index(self, mock_search):
instance = mock_search.return_value
instance.search.return_value = [{}]
node = ("organizations", "3d62d9ea-0600-4f29-8ce6-f7720fd49aa3", "update")
update_node(node)
organization = Organization.objects.language("en").get(id="3d62d9ea-0600-4f29-8ce6-f7720fd49aa3")
instance.update.assert_called_with(organization, OrganizationSerializer) |
abztrakt/uw-skilltree | skilltreeapp/views/pages.py | Python | apache-2.0 | 4,046 | 0.003707 | from django.conf import settings
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout
from django.core.context_processors import csrf
from skilltreeapp.forms import LoginForm
from django.shortcuts import render_to_response, HttpResponseRedirect
import datetime
from labgeeks_hermes.models import Notification
from labgeeks_hermes.forms import NotificationForm
# Create your views here.
def home(request):
params = {}
c = {}
c.update(csrf(request))
if request.user.is_authenticated():
# hermes code goes here
| locations = request.user.location_set.a | ll()
now = datetime.datetime.now()
notifications = Notification.objects.all()
events = []
alerts = []
for noti in notifications:
if noti.due_date:
if now.date() - noti.due_date.date() >= datetime.timedelta(days=1):
noti.archived = True
elif not noti.due_date.date() - now.date() > datetime.timedelta(days=7) and not noti.archived:
events.append(noti)
else:
if not noti.archived:
alerts.append(noti)
events.sort(key=lambda x: x.due_date)
form_is_valid = True
if request.method == 'POST':
archive_ids = request.POST.getlist('pk')
if archive_ids:
for archive_id in archive_ids:
notif = Notification.objects.get(pk=archive_id)
notif.archived = True
notif.save()
return HttpResponseRedirect('/')
form = NotificationForm(request.POST)
if form.is_valid():
form_is_valid = True
notification = form.save(commit=False)
notification.user = request.user
if notification.due_date:
if now.date() - notification.due_date.date() >= datetime.timedelta(days=1):
notification.archived = True
notification.save()
return HttpResponseRedirect('/')
else:
form_is_valid = False
else:
form = NotificationForm()
params = {
'request': request,
'events': events,
'alerts': alerts,
'form': form,
'c': c,
}
return render_to_response('pages/home.html', params, context_instance=RequestContext(request))
else:
form = LoginForm()
params = { 'form': form }
return HttpResponseRedirect('/accounts/login')
def basic(request):
params = {}
return render_to_response('pages/basic.html', params, context_instance=RequestContext(request))
def hybrid(request):
params = {}
return render_to_response('pages/hybrid.html', params, context_instance=RequestContext(request))
def tools_login(request):
""" Login a user. Called by @login_required decorator.
"""
c = {}
c.update(csrf(request))
if request.user.is_authenticated():
try:
return HttpResponseRedirect(request.GET['next'])
except:
return HttpResponseRedirect('/')
elif request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
else:
form = LoginForm()
return render_to_response('pages/login.html', locals(), context_instance=RequestContext(request))
def tools_logout(request):
""" Manually log a user out.
"""
logout(request)
return HttpResponseRedirect('/')
|
timjarman/seeds | seeds/seedsdb/tests/unit/test_models.py | Python | mit | 4,829 | 0.000828 | from django.test import TestCase
from seedsdb.models import (
Plant, Tag, Harvest, Activity
)
class TestPlant(TestCase):
def tearDown(self):
Plant.objects.all().delete()
def make_test_plant(self, aliases=None):
if aliases:
aliases = "|".join(aliases)
else:
aliases = ""
plant = Plant.objects.create(
name="Love in idleness",
description="Test description",
aliases=aliases
)
return plant
def test_unicode(self):
"The unicode method of a plant returns the expected value"
| plant = self.make_test_plant()
self.assertEqual(u"Love in idleness", unicode(plant))
def test_slug_create(self):
"Creating a new plant sets the slug as expected "
plant = self.make_test_plant()
self.assertEqual("love-in-idleness", plant.slug)
def test_slug_update(self):
"Renaming an existinmg plant updates the slug as expected "
plant = self.make_test_plant()
plant.name = 'Love lies oozing'
plant.save()
self.ass | ertEqual("love-lies-oozing", plant.slug)
def test_get_absolute_url(self):
plant = self.make_test_plant()
expected_url = "/plants/detail/love-in-idleness/"
self.assertEqual(expected_url, plant.get_absolute_url())
def test_aliases_string_none(self):
"Ensure the liases_string property works when no alias is defined"
plant = self.make_test_plant()
self.assertEqual(u"", plant.aliases_string)
def test_aliases_string_one(self):
"Ensure the aliases_string property works when one alias is defined"
plant = self.make_test_plant(aliases=["Alternative"])
self.assertEqual(u"Alternative", plant.aliases_string)
def test_aliases_string_multiple(self):
"Ensure the aliases property works when more than one alias is defined"
plant = self.make_test_plant(aliases=["Alternative", "Beta"])
self.assertEqual(u"Alternative, Beta", plant.aliases_string)
def test_aliases_search_none(self):
"Ensure the aliases_search property works when no alias is defined"
plant = self.make_test_plant()
self.assertEqual(u"", plant.aliases_search)
def test_aliases_search_one(self):
"Ensure the aliases_search property works when one alias is defined"
plant = self.make_test_plant(aliases=["Alternative"])
self.assertEqual(u"Alternative", plant.aliases_search)
def test_aliases_search_multiple(self):
"Ensure the aliases_search property works when more than one alias is defined"
plant = self.make_test_plant(aliases=["Alternative", "Beta"])
self.assertEqual(u"Alternative Beta", plant.aliases_search)
class TestTag(TestCase):
def tearDown(self):
Tag.objects.all().delete()
def test_unicode(self):
"The unicode method of a tag returns the expected value"
tag = Tag.objects.create(caption="test tag")
self.assertEqual(u"test tag", unicode(tag))
def test_tag_normalisation(self):
"A tag is normalised on save as expecgted"
tag = Tag.objects.create(caption=" VALUE ")
self.assertEqual("value", tag.caption)
class TestHarvest(TestCase):
def setUp(self):
self.test_plant = Plant.objects.create(
name="Love in idleness",
description="Test description",
)
def tearDown(self):
Harvest.objects.all().delete()
def test_unicode(self):
"The unicode method of a harvest returns the expected value"
harvest = Harvest.objects.create(season=2014, plant=self.test_plant)
self.assertEqual(u"2014 harvest of Love in idleness", unicode(harvest))
class TestActivity(TestCase):
def setUp(self):
self.test_plant = Plant.objects.create(
name="Love in idleness",
description="Test description",
)
def tearDown(self):
Activity.objects.all().delete()
def test_unicode(self):
"The unicode method of an activity returns the expected value"
activities = ['Sow', 'Plant out', 'Flowering', 'Harvest']
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
for i, activity_name in enumerate(activities):
for j, month in enumerate(months):
activity = Activity.objects.create(plant=self.test_plant,
activity=i + 1,
month=j + 1)
expected = u"{0} Love in idleness in {1}".format(activity_name, month)
self.assertEqual(expected, unicode(activity))
|
jhseu/tensorflow | tensorflow/python/keras/mixed_precision/experimental/autocast_variable.py | Python | apache-2.0 | 17,110 | 0.009936 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains AutoCastVariable, a variable which automatically casts itself."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import values as distribute_values
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
class AutoCastVariable(variables.Variable):
"""Variable that will cast itself to a different dtype in applicable contexts.
This class wraps a floating-point tf.Variable. It emulates the vari | able
interface and delegates to the wrapped variable, but it additionally will cast
the wrapped variable under a `Graph._enable_variable_auto_cast(dtype)` context
manager.
For example:
```
v = tf.Variable(1.0, dtype=tf.float32)
v = AutoCastVariable(v)
print(tf | .identity(v).dtype) # tf.float32
with ops.get_default_graph()._enable_variable_auto_cast(tf.float16):
print(tf.identity(v).dtype) # tf.float16, as v will cast itself to float16
print(v.dtype) # tf.float16, as v.dtype also changes under the ctx manager.
```
The purpose of this class is to allow Keras layers to create variables in
float32, and automatically cast them to float16 or bfloat16 when the layer is
called.
"""
def __init__(self, variable):
"""Creates an AutoCastVariable instance.
Args:
variable: A floating-point resource variable to wrap.
Raises:
ValueError: If `variable` is not a floating-point resource variable
"""
if not resource_variable_ops.is_resource_variable(variable):
raise ValueError('variable must be of type tf.ResourceVariable, but got: '
'%s' % variable)
if not variable.dtype.is_floating:
raise ValueError('variable must be a floating point variable but has '
'type: %s' % variable.dtype.name)
self._variable = variable
def _should_cast(self):
"""Returns True if this variable should be casted when accessed."""
g = ops.get_default_graph()
# pylint:disable=protected-access
return (g._auto_cast_variable_read_dtype is not None and
self.true_dtype != g._auto_cast_variable_read_dtype)
# pylint:enable=protected-access
@property
def dtype(self):
"""The dtype this variable will be casted to when read."""
if self._should_cast():
return ops.get_default_graph()._auto_cast_variable_read_dtype # pylint:disable=protected-access
else:
return self._variable.dtype
@property
def true_dtype(self):
"""The dtype of the underlying variable, before any casts are done."""
return self._variable.dtype
def value(self):
val = self._variable.value()
if not self._should_cast():
return val
return math_ops.cast(val, self.dtype)
def read_value(self):
val = self._variable.read_value()
return math_ops.cast(val, self.dtype)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
val = self._variable.sparse_read(indices, name=name)
return math_ops.cast(val, self.dtype)
def gather_nd(self, indices, name=None):
"""Gather slices of the variable into a Tensor."""
val = self._variable.gather_nd(indices, name=name)
return math_ops.cast(val, self.dtype)
def __getattr__(self, name):
return getattr(self._variable, name)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts this variable to a tensor."""
if not self._should_cast():
return ops.convert_to_tensor(self._variable, dtype, name, as_ref)
# TODO(reedwm): Support as_ref?
assert not as_ref
if dtype is not None and not dtype.is_compatible_with(self.dtype):
raise ValueError(
'Incompatible type conversion requested to type {!r} for variable '
'of type {!r}'.format(dtype.name, self.dtype.name))
val = ops.convert_to_tensor(
self._variable, self._variable.dtype, name, as_ref=False)
return math_ops.cast(val, self.dtype)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} "
'dtype={v.dtype.name} true_dtype={v.true_dtype.name}, '
'numpy={np_repr}>')
return repr_str.format(
v=self, np_repr=ops.numpy_text(self.read_value(), is_repr=True))
else:
repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} "
'dtype={v.dtype.name} true_dtype={v.true_dtype.name}>')
return repr_str.format(v=self)
# Method delegations: We delegate the following methods to self._variable.
# Each of these methods simply calls the same method on self._variable. The
# base Variable raises NotImplementedError for most of these, so we must
# override them.
#
# We do not define the following methods from Variable for the following
# reasons:
# * 'count_up_to': This method only applies to int variables, which cannot
# be wrapped with an AutoCastVariable.
# * 'experimental_ref': Instead we inherit the definition from Variable.
# If we defined and delegated to Variable, the ref of an AutoCastVariable
# would be the same as the ref of the underlying variable, which would be
# strange as they are different Python objects.
# pylint: disable=multiple-statements
def set_shape(self, shape):
return self._variable.set_shape(self, shape)
@property
def trainable(self):
return self._variable.trainable
@property
def synchronization(self):
return self._variable.synchronization
@property
def aggregation(self):
return self._variable.aggregation
def eval(self, session=None):
return self._variable.eval(session)
def initialized_value(self):
return self._variable.initialized_value()
@property
def initial_value(self):
return self._variable.initial_value
@property
def constraint(self):
return self._variable.constraint
def assign(self, value, use_locking=None, name=None, read_value=True):
assign_op = self._variable.assign(value, use_locking, name, read_value)
return _maybe_wrap(assign_op, wrap=read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
assign_op = self._variable.assign_add(delta, use_locking, name, read_value)
return _maybe_wrap(assign_op, wrap=read_value)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
assign_op = self._variable.assign_sub(delta, use_locking, name, read_value)
return _maybe_wrap(assign_op, wrap=read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
var = self._variable.scatter_sub(sparse_delta, use_locking, name)
return _maybe_wrap(var)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
var = self._variable.scatter_add(sparse_delta, use_locking, name)
return _maybe_wrap(var)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
var = self._variable.scatter_max(sparse_delta, use_locking, name)
return _maybe_wrap(var)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
var = self._variable.scatte |
SELO77/seloPython | 3.X/ex/asyncIOex.py | Python | mit | 5,486 | 0.004063 |
# def custom_range(end):
# i = 0
# z = "How generator works!!??"
# while i < end:
# yield i #coroutine : 진입점이 여러개인 함수
# print('custom_range.i :%s'%i)
# i += 1
# yield z
# gen = custom_range(5)
# for i in gen:
# print(i, end=',')
# list(gen)
# print(next(gen))
# def sendme():
# while 1:
# something = yield
# if something is None:
# raise StopIteration()
# print(something)
# gen = sendme()
# next(gen)
# gen.send('a')
# gen.send('b')
# gen.send(None)
# import timeit
# from urllib.request import urlopen
#
# urls = ['http://naver.com', 'https://google.com', 'https://apple.com', 'https://ubit.info', 'https://github.com/ssut']
# start = timeit.default_timer()
#
# for url in urls:
# print('Start', url)
# urlopen(url)
# print('Done', url)
#
# duration = timeit.default_timer() - start
# print(duration)
# Start http://naver.com
# Done http://naver.com
# Start https://google.com
# Done https://google.com
# Start https://apple.com
# Done https://apple.com
# Start https://ubit.info
# Done https://ubit.info
# Start https://github.com/ssut
# Done https://github.com/ssut
# 3.2820995340007357
# 위 코드의 문제점은 다음과 같습니다:
# 한 번에 한 URL밖에 받아오지 못합니다.
# 따라서 한 작업이 끝나기 전까지 다음 작업을 할 수 없습니다.
# URL이 늘어나면 늘어날수록 위 코드는 매우 비효율적입니다.
# IO 작업이 끝나기 전까지 나머지 모든 코드는 블록됩니다.
# ==========================================
# import timeit
# from concurrent.futures import ThreadPoolExecutor
# from urllib.request import urlopen
#
# urls = ['http://naver.com', 'https://google.com', 'https://apple.com', 'https://ubit.info', 'https://github.com/ssut']
#
# def fetch(url):
# print('Start', url)
# urlopen(url)
# print('Done', url)
#
# start = timeit.default_timer()
# with ThreadPoolExecutor(max_workers=5) as executor:
# for url in urls:
# executor.submit(fetch, url)
#
# duration = timeit.default_timer() - start
# print(duration)
# Start http://naver.com
# Start https://google.com
# Start https://apple.com
# Start https://ubit.info
# Start https://github.com/ssut
# Done http://naver.com
# Done https://ubit.info
# | Done https://google.com
# Done https://apple.com
# Done https://github.com/ssut
# 1.3564843910025957
# 스레드 갯수는 OS에 의해 한정되어 있습니다.
# 공유 메모리 문제가 있습 | 니다. 락(Mutex)을 사용해야 하죠.
# 그런데 파이썬에는 GIL이라는 괴상한 놈이 있습니다.
# 파이썬 스레드를 스레드답게 쓰지 못하게(?) 해주는 놈인데요. 뭐 자세한 설명은 구글링을.. (이 글에서는 aio에 대해서만 다룹니다. ㅠㅠ)
# ==========================================
# import timeit
# from concurrent.futures import ProcessPoolExecutor
# from urllib.request import urlopen
#
# urls = ['http://naver.com', 'https://google.com', 'https://apple.com', 'https://ubit.info', 'https://github.com/ssut']
#
# def fetch(url):
# print('Start', url)
# urlopen(url)
# print('Done', url)
#
# start = timeit.default_timer()
# with ProcessPoolExecutor(max_workers=5) as executor:
# for url in urls:
# executor.submit(fetch, url)
#
# duration = timeit.default_timer() - start
# print(duration)
# Start https://google.com
# Start http://naver.com
# Start https://apple.com
# Start https://ubit.info
# Start https://github.com/ssut
# Done https://ubit.info
# Done http://naver.com
# Done https://google.com
# Done https://apple.com
# Done https://github.com/ssut
# 1.5680245689982257
# 프로세스는 스레드와 비슷하지만 직접적으로 메모리를 공유하지 않습니다. (물론 일부 상황을 제외하고고 가능합니다)
# GIL도 없습니다: CPU-bound 작업에 유리합니다.
# 단 프로세스는 높은 cost를 요구합니다. 즉, 오버헤드가 큽니다. 시간을 잴 때 보면 알겠지만 프로세스 풀이 생성되고 작업이 끝나기 까지의 시간을 쟀습니다.
# 위에서 언급한 GIL을 조금 찾아보시면 아시겠지만 CPU-bound 작업에는 프로세스를 스폰하는 방법이 무조건 유리합니다. (AsyncIO도 동일합니다.)
# ==========================================
import aiohttp
import asyncio
import timeit
@asyncio.coroutine
def fetch(url):
print('Start', url)
req = yield from aiohttp.request('GET', url)
print('Done', url)
@asyncio.coroutine
def fetch_all(urls):
fetches = [asyncio.Task(fetch(url)) for url in urls]
yield from asyncio.gather(*fetches)
urls = ['http://naver.com', 'https://google.com', 'https://apple.com', 'https://ubit.info', 'https://github.com/ssut']
start = timeit.default_timer()
asyncio.get_event_loop().run_until_complete(fetch_all(urls))
duration = timeit.default_timer() - start
print(duration)
#1.4278107339996495
# asyncio에서 이벤트 루프를 끄집어오고 fetch_all 함수를 실행합니다. (코루틴 함수를 실행합니다.)
# fetch_all 함수 내에서 fetch(url) 가 실행된 결과(제너레이터)를 asyncio.Task로 래핑해줍니다. 결과, asyncio에서는 위 이미지에 보이는 이벤트 루프 큐에 이 함수 제너레이터를 삽입하고 자동으로 스케쥴링을 합니다.
# asyncio.gather 함수는 넘겨진 모든 코루틴 Task가 종료될 때까지 기다렸다가 종료되면 모든 결과를 모아서 리턴해줍니다.
|
hdm-dt-fb/rvt_model_services | commands/audit_no_ws/__init__.py | Python | mit | 979 | 0.001021 | import datetime
import rjm
from pathlib import Path
def cmd_journal(project_code, model_path, jrn_path, com_dir, log_dir):
command_path = Path(__file__).parent
command_name = command_path.name
time_stamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
rvt_jrn = rjm.JournalMaker()
com_data = {
"SearchPaths": command_path,
"ModelName": model_path.name,
"OutputPath": log_dir,
"OutputPrefix": project_code,
"LogFile": log_dir / f"{time_stamp}_{command_name}_rms_exec_results.log",
"ScriptSource": command_path / "rps_detach_audit.py",
}
rvt_jrn.execute_command(
| tab_name='Add-Ins',
panel_name=' Revit Model Services (RMS) ',
command_module='RMSCmdExecutor',
command_class='RMSCmdExecutorCommand',
command_data=com_data,
)
rvt_jrn.close_model()
rvt_jrn.write_journal(jrn_path)
register = {
"name": "audit_no_ws",
"rjm": cmd_jour | nal,
}
|
UCSD-CCAL/ccal | ccal/_identify_what_to_count.py | Python | mit | 2,352 | 0.001276 | def _identify_what_to_count(signature_component_weight):
signature_component_dict = {}
signature_component_differing_dict = {}
signature_component_before_dict = {}
signature_component_before_differing_dict = {}
for signature_component, weight in signature_component_weight.items():
signature_component_before, signature_component_after = signature_component.split(
" ==> "
)
signature_component_dict[signature_component] = {
"before_sequence": signature_component_before,
"after_sequence": signature_component_after,
"n": 0,
"weight": weight,
}
signature_component_before_differing = signature_component_before[1]
signature_component_after_differing = signature_component_after[1]
k = "{} ==> {}".format(
signature_component_before_differing, signature_component_after_differing
)
if k not in signature_component_differing_dict:
signature_component_differing_dict[k] = {
"before_sequence": signature_component_before_differing,
"after_sequence": signature_component_after_differing,
"n": 0,
"weight": weight,
}
else:
signature_component_differing_dict[k]["weight"] = max(
weight, signature_component_differing_dict[k]["weight"]
)
k = signature_component_before
if k not in signature_component_before_dict:
signature_component_before_dict[k] = {"n": 0, "weight": weight}
else:
signature_component_before_dict[k]["weight"] = max(
weight, signature_component_before_dict[k]["weight"]
)
k = signature_component_before_differing
if k not in signature_component_before_differing_dict:
signature_component_before_differing_dict[k] = {"n": 0, "weight": weight}
else:
| signature_component_before_differing_dict[k]["weight"] = max(
weight, signature_component_before_differing | _dict[k]["weight"]
)
return (
signature_component_dict,
signature_component_differing_dict,
signature_component_before_dict,
signature_component_before_differing_dict,
)
|
openai/CLIP | setup.py | Python | mit | 491 | 0 | import os
import pkg_resources
from setuptools import setup, find_packages
setup(
name="clip",
py_modules=["clip"],
version="1.0",
description="",
author="OpenAI",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__ | ), "requirements.txt"))
)
],
include_package_data=True,
extras_require={'dev': ['pytest']},
) | |
ajaniv/django-core-models | django_core_models/core/views.py | Python | mit | 2,384 | 0 | """
.. module:: django_core_models.core.views
:synopsis: django_core_models core application views module.
*django_core_models* core application views module.
"""
from __future__ import absolute_import
from rest_framework.decorators import api_view
from django_core_utils.views import (instance_list, instance_detail,
ObjectListView, ObjectDetailView)
from . import models
from . import serializers
@api_view(['GET', 'POST'])
def category_list(request, content_format=None):
"""
List all categories, or cre | ate a new category instance.
"""
return instance_list(request, models.Category,
serializers.CategorySerializer,
format)
@api_view(['GET', 'PUT', 'DELETE'])
def category_detail(request, pk, content_format=None):
"""
Retrieve, up | date or delete a category.
"""
return instance_detail(request, pk, models.Category,
serializers.CategorySerializer,
format)
class AnnotationMixin(object):
"""Annotation mixin class."""
queryset = models.Annotation.objects.all()
serializer_class = serializers.AnnotationSerializer
class AnnotationList(AnnotationMixin, ObjectListView):
"""Class to list all annotations, or create a new annotation instance."""
pass
class AnnotationDetail(AnnotationMixin, ObjectDetailView):
"""
Class to retrieve, update or delete annotation instance.
"""
pass
class CategoryMixin(object):
"""Category mixin class."""
queryset = models.Category.objects.all()
serializer_class = serializers.CategorySerializer
class CategoryList(CategoryMixin, ObjectListView):
"""Class to list all currencies, or create a new category instance."""
pass
class CategoryDetail(CategoryMixin, ObjectDetailView):
"""
Class to retrieve, update or delete category instance.
"""
pass
class CurrencyMixin(object):
"""Currency mixin class."""
queryset = models.Currency.objects.all()
serializer_class = serializers.CurrencySerializer
class CurrencyList(CurrencyMixin, ObjectListView):
"""Class to list all currencies, or create a new currency instance."""
pass
class CurrencyDetail(CurrencyMixin, ObjectDetailView):
"""
Class to retrieve, update or delete currency instance.
"""
pass
|
bqbn/addons-server | src/olympia/addons/migrations/0001_initial.py | Python | bsd-3-clause | 26,975 | 0.004708 | # Generated by Django 2.2.5 on 2019-09-12 13:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import django.utils.timezone
import django_extensions.db.fields.json
import olympia.amo.fields
import olympia.amo.models
import olympia.translations.fields
import olympia.users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('translations', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Addon',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('guid', models.CharField(max_length=255, null=True, unique=True)),
('slug', models.CharField(max_length=30, null=True, unique=True)),
('default_locale', models.CharField(db_column='defaultlocale', default='en-US', max_length=10)),
('type', models.PositiveIntegerField(choices=[(1, 'Extension'), (2, 'Complete Theme'), (3, 'Dictionary'), (4, 'Search Engine'), (5, 'Language Pack (Application)'), (6, 'Language Pack (Add-on)'), (7, 'Plugin'), (9, 'Deprecated LWT'), (10, 'Theme (Static)')], db_column='addontype_id', default=1)),
('status', models.PositiveIntegerField(choices=[(0, 'Incomplete'), (3, 'Awaiting Review'), (4, 'Approved'), (5, 'Disabled by Mozilla'), (11, 'Deleted')], default=0)),
('icon_type', models.CharField(blank=True, db_column='icontype', max_length=25)),
('icon_hash', models.CharField(blank=True, max_length=8, null=True)),
('average_rating', models.FloatField(db_column='averagerating', default=0, max_length=255, null=True)),
('bayesian_rating', models.FloatField(db_column='bayesianrating', default=0)),
('total_ratings', models.PositiveIntegerField(db_column='totalreviews', default=0)),
('text_ratings_count', models.PositiveIntegerField(db_column='textreviewscount', default=0)),
('weekly_downloads', models.PositiveIntegerField(db_column='weeklydownloads', default=0)),
('total_downloads', models.PositiveIntegerField(db_column='totaldownloads', default=0)),
('hotness', models.FloatField(default=0)),
('average_daily_users', models.PositiveIntegerField(default=0)),
('last_updated', models.DateTimeField(help_text='Last time this add-on had a file/version update', null=True)),
('disabled_by_user', models.BooleanField(db_column='inactive', default=False)),
('view_source', models.BooleanField(db_column='viewsource', default=True)),
('public_stats', models.BooleanField(db_column='publicstats', default=False)),
('target_locale', models.CharField(blank=True, help_text='For dictionaries and language packs. Identifies the language and, optionally, region that this add-on is written for. Examples: en-US, fr, and de-AT', max_length=255, null=True)),
('contributions', models.URLField(blank=True, max_length=255)),
('is_experimental', models.BooleanField(db_column='experimental', default=False)),
('reputation', models.SmallIntegerField(choices=[(0, 'No Reputation'), (1, 'Good (1)'), (2, 'Very Good (2)'), (3, 'Excellent (3)')], default=0, help_text='The higher the reputation value, the further down the add-on will be in the auto-approved review queue. A value of 0 has no impact', null=True)),
('requires_payment', models.BooleanField(default=False)),
],
options={
'db_table': 'addons',
'base_manager_name': 'unfiltered',
},
bases=(olympia.amo.models.OnChangeMixin, olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
managers=[
('unfiltered', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='AddonCategory',
fields=[
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('feature', models.BooleanField(default=False)),
('feature_locales', models.CharField(default='', max_length=255, null=True)),
('addon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='addons.Addon')),
],
options={
'db_table': 'addons_categories',
},
),
migrations.CreateModel(
name='DeniedGuid',
fields=[
( | 'created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('guid', model | s.CharField(max_length=255, unique=True)),
('comments', models.TextField(blank=True, default='')),
],
options={
'db_table': 'denied_guids',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='DeniedSlug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(default='', max_length=255, unique=True)),
],
options={
'db_table': 'addons_denied_slug',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='ReplacementAddon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('guid', models.CharField(max_length=255, null=True, unique=True)),
('path', models.CharField(help_text='Addon and collection paths need to end with "/"', max_length=255, null=True)),
],
options={
'db_table': 'replacement_addons',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='AddonApprovalsCounter',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('addon', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='addons.Addon')),
('counter', models.PositiveIntegerField(default=0)),
('last_human_review', models.DateTimeField(null=True)),
('last_content_review', models.DateTimeField(null=True)),
],
options={
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='AddonReviewerFlags',
fields=[
('created', models.DateTimeField(blank=True, default |
Naftoreiclag/Genre-Entities | tool/SyncEngineCodelite.py | Python | apache-2.0 | 2,659 | 0.002633 | # Copyright 2017 James Fong
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
import os
from Common import get_project_name
proj_name = get_project_name()
from Common import indexFiles
sourceList, _, __ = \
indexFiles('../src/' + proj_name + '/', ['.cpp', '.hpp'], ['deprecated/'])
projectFilename = '../ide/Codelite/Codelite.project'
sourceRootRelativePath = '../../src/' + proj_name + '/'
projectEtree = ET.parse(projectFilename)
projectRoot = projectEtree.getroot()
virtualDirs = projectRoot.findall('VirtualDirectory')
myprojectVirtualDir = None
for virtualDir in virtualDirs:
if virtualDir.get('Name') == proj_name:
myprojectVirtualDir = virtualDir
break
if not myprojectVirtualDir:
myprojectVirtualDir = ET.SubElement(projectRoot, 'VirtualDirectory')
myprojectVirtualDir.clear()
myprojectVirtualDir.set('Name', proj_name)
for sourcePath in sourceList:
def decomposePath(path):
'' | '
Converts a path into a list of directory names leading up to the tail
(filename) and the filename.
'''
path, filename = os.path.split(path)
decomposed = []
| while True:
head, tail = os.path.split(path)
if tail:
decomposed.append(tail)
path = head
else:
break
decomposed.reverse()
return decomposed
path = decomposePath(sourcePath)
parentElem = myprojectVirtualDir
for dirName in path:
parentChildren = parentElem.findall('VirtualDirectory')
foundChild = None
for childElem in parentChildren:
if childElem.get('Name') == dirName:
foundChild = childElem
break
if foundChild:
parentElem = foundChild
else:
parentElem = ET.SubElement(parentElem, 'VirtualDirectory')
parentElem.set('Name', dirName)
sourceElem = ET.SubElement(parentElem, 'File')
sourceElem.set('Name', \
os.path.join(sourceRootRelativePath, sourcePath))
projectEtree.write(projectFilename)
|
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_application_gateways_operations.py | Python | mit | 73,707 | 0.005088 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewaysOperations(object):
"""ApplicationGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscr | iptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_met | hod = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationGateway"
"""Gets the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Applicat |
the-it/WS_THEbotIT | archive/online/2015/150914_add_pages_Saint_Germain.py | Python | mit | 3,440 | 0.006412 | # -*- coding: utf-8 -*-
__author__ = 'eso'
import sys
sys.path.append('../../')
from tools.petscan import PetScan
import re
import requests
import pywikibot
list_of_pages =[1134,
1135,
1136,
1137,
1138,
1139,
1140,
1141,
1142,
1143,
1144,
1145,
1146,
1147,
1148,
1149,
1150,
1151,
1152,
1153,
1154,
1155,
1156,
1157,
1158,
| 1159,
| 1160,
1161,
1162,
1163,
1164,
1165,
1166,
1167,
1168,
1169,
1170,
1171,
1172,
1173,
1174,
1175,
1176,
1177,
1178,
1179,
1180,
1181,
1182,
1183,
1184,
1185,
1186,
1187,
1188,
1189,
1190,
1191,
1192,
1191,
1194,
1195,
1196,
1197,
1198,
1199,
1200,
1201,
1203,
1205,
1207,
1209,
1211,
1213,
1215,
1217,
1219,
1221,
1223,
1225,
1227,
1229,
1231,
1233,
1234,
1235,
1236,
1237,
1238,
1239,
1240,
1241,
1243,
1244]
header = '''<noinclude><pagequality level="1" user="THEbotIT" /><div class="pagetext">{{Seitenstatus2||[[Staatsvertrag von Saint-Germain-en-Laye]]. In: Staatsgesetzblatt für die Republik Österreich. Jahrgang 1920, S. 995–1244|Staatsvertrag von Saint-Germain-en-Laye|}}{{BlockSatzStart}}
</noinclude>'''
footer = '''<noinclude>{{BlockSatzEnd}}{{Zitierempfehlung|Projekt=: ''[[Staatsvertrag von Saint-Germain-en-Laye]]. In: Staatsgesetzblatt für die Republik Österreich. Jahrgang 1920, S. 995–1244''. Österreichische Staatsdruckerei, Wien 1920|Seite=%s}}</div></noinclude>'''
with open('saintgermain.txt', mode='r', encoding='utf8') as rawfile:
text = re.split('\{\{Seite\|\d{3,4}\|\|Staatsgesetzblatt_\(Austria\)_1920_\d{4}\.jpg\}\}', rawfile.read())
site = pywikibot.Site()
for idx, i in enumerate(list_of_pages):
if i == 995:
continue
if i < 1000:
lemma = 'Seite:Staatsgesetzblatt (Austria) 1920 0{}.jpg'.format(i)
else:
lemma = 'Seite:Staatsgesetzblatt (Austria) 1920 {}.jpg'.format(i)
page = pywikibot.Page(site, lemma)
page.text = header + text[idx] + (footer % i)
page.save(summary='Automatische Konvertierung von PR1 zu PR2', botflag=True) |
virantha/scanpdf | docs/conf.py | Python | apache-2.0 | 10,696 | 0.006638 | # -*- coding: utf-8 -*-
#
# scanpdf documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 23 13:43:29 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Scan PDF'
copyright = u'2014, Virantha N. Ekanayake'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
try:
release = pkg_resources.get_distribution('scanpdf').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, The distribution information of scanpdf'
print 'Has to be available. Either install the package into your'
print 'development environment or run "setup.py develop" to setup the'
print 'metadata. A virtualenv is recommended!'
sys.exit(1)
del pkg_resources
version = '.'.join(release.split('.')[:2])
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', | a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {} |
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'scanpdfdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'scanpdf.tex', u'Scan PDF Documentation',
u'Virantha N. Ekanayake', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scanpdf', u'Scan PDF Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_d |
LLNL/spack | lib/spack/spack/util/log_parse.py | Python | lgpl-2.1 | 3,781 | 0 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import sys
from ctest_log_parser import BuildError, BuildWarning, CTestLogParser
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
__all__ = ['parse_log_events', 'make_log_context']
def parse_log_events(stream, contex | t=6, jobs=None, profile=False):
"""Extract interesting events from a log file as a list of LogEvent.
Args:
stream (str or typing.IO): build log name or file object
context (int): lines of context to extract around each log event
job | s (int): number of jobs to parse with; default ncpus
profile (bool): print out profile information for parsing
Returns:
(tuple): two lists containig ``BuildError`` and
``BuildWarning`` objects.
This is a wrapper around ``ctest_log_parser.CTestLogParser`` that
lazily constructs a single ``CTestLogParser`` object. This ensures
that all the regex compilation is only done once.
"""
if parse_log_events.ctest_parser is None:
parse_log_events.ctest_parser = CTestLogParser(profile=profile)
result = parse_log_events.ctest_parser.parse(stream, context, jobs)
if profile:
parse_log_events.ctest_parser.print_timings()
return result
#: lazily constructed CTest log parser
parse_log_events.ctest_parser = None # type: ignore[attr-defined]
def _wrap(text, width):
"""Break text into lines of specific width."""
lines = []
pos = 0
while pos < len(text):
lines.append(text[pos:pos + width])
pos += width
return lines
def make_log_context(log_events, width=None):
"""Get error context from a log file.
Args:
log_events (list): list of events created by
``ctest_log_parser.parse()``
width (int or None): wrap width; ``0`` for no limit; ``None`` to
auto-size for terminal
Returns:
str: context from the build log with errors highlighted
Parses the log file for lines containing errors, and prints them out
with line numbers and context. Errors are highlighted with '>>' and
with red highlighting (if color is enabled).
Events are sorted by line number before they are displayed.
"""
error_lines = set(e.line_no for e in log_events)
log_events = sorted(log_events, key=lambda e: e.line_no)
num_width = len(str(max(error_lines or [0]))) + 4
line_fmt = '%%-%dd%%s' % num_width
indent = ' ' * (5 + num_width)
if width is None:
_, width = tty.terminal_size()
if width <= 0:
width = sys.maxsize
wrap_width = width - num_width - 6
out = StringIO()
next_line = 1
for event in log_events:
start = event.start
if isinstance(event, BuildError):
color = 'R'
elif isinstance(event, BuildWarning):
color = 'Y'
else:
color = 'W'
if next_line != 1 and start > next_line:
out.write('\n ...\n\n')
if start < next_line:
start = next_line
for i in range(start, event.end):
# wrap to width
lines = _wrap(event[i], wrap_width)
lines[1:] = [indent + ln for ln in lines[1:]]
wrapped_line = line_fmt % (i, '\n'.join(lines))
if i in error_lines:
out.write(colorize(
' @%s{>> %s}\n' % (color, cescape(wrapped_line))))
else:
out.write(' %s\n' % wrapped_line)
next_line = event.end
return out.getvalue()
|
clic-lab/blocks | BlockWorldRoboticAgent/agent_no_model.py | Python | gpl-3.0 | 5,392 | 0.002411 | import sys
import logger
import message_protocol_util as mpu
import reliable_connect as rc
import generic_policy as gp
import random
ORACLE, RANDOM_WALK, STOP = range(3)
class AgentModelLess:
"""" The block world agent that implements oracle, random walk and the stop baseline.
Requires no parameters to be tuned. This is implemented in a separate file to remove
dependencies on tensorflow allowing it to run on systems without those dependencies. """
def __init__(self, agent_type, config, constants):
# Initialize logger
logger.Log.open("./log.txt")
self.config = config
# Connect to simulator
if len(sys.argv) < 2:
logger.Log.info("IP not given. Using localhost i.e. 0.0.0.0")
self.unity_ip = "0.0.0.0"
else:
self.unity_ip = sys.argv[1]
if len(sys.argv) < 3:
logger.Log.info("PORT not given. Using 11000")
self.PORT = 11000
else:
self.PORT = int(sys.argv[2])
self.agent_type = agent_type
# Size of image
image_dim = self.config.screen_size
self.connection = rc.ReliableConnect(self.unity_ip, self.PORT, image_dim)
self.connection.connect()
# Dataset specific parameters
self.num_block = 20
self.num_direction = 4
use_stop = True
if use_stop:
self.num_actions = self.num_block * self.num_direction + 1 # 1 for stopping
else:
self.num_actions = self.num_block * self.num_direction
# Create toolkit of message protocol between simulator and agent
self.message_protocol_kit = mpu.MessageProtocolUtil(self.num_direction, self.num_actions, use_stop)
# Test policy
self.test_policy = gp.GenericPolicy.get_argmax_action
# MDP details
self.gamma = 1.0
self.config.log_flag()
logger.Log.info("Created Agent.")
def receive_instruction_and_image(self):
""" Receives image and then reset message. Returns decoded
message with image. """
img = self.connection.receive_image()
response = self.connection.receive_message()
(status_code, bisk_metric, _, instruction, trajectory) = \
self.message_protocol_kit.decode_reset_message(response)
return status_code, bisk_metric, img, instruction, trajectory
def receive_response_and_image(self):
""" Receives image and then response message. Returns decoded
message with image. """
img = self.connection.receive_image()
response = self.connection.receive_message()
(status_code, reward, _, reset_file_name) = self.message_protocol_kit.decode_message(response)
return status_code, reward, img, reset_file_name
def test(self, dataset_size):
""" Runs oracle algorithm on the dataset " | ""
sum_bisk_metric = 0
for i in range(0, dataset_size):
(_, bisk_metric, current_env, instruction, trajectory) = self.receive_instruction_and_image()
sum_bisk_metric = sum_bisk_metric + bisk_metric
logger.Log.info("Bisk Metric " + str(bisk_metric))
logger.Log.info("Instruction: " + str(instruction))
| steps = 0
sample_expected_reward = 0
running_gamma = 1.0
while True:
# sample action from the likelihood distribution
if self.agent_type == ORACLE:
action_id = trajectory[steps]
elif self.agent_type == RANDOM_WALK:
action_id = random.randint(0, 80)
elif self.agent_type == STOP:
action_id = 80
else:
raise AssertionError("Unknown agent type. Found " + str(self.agent_type))
action_str = self.message_protocol_kit.encode_action(action_id)
print "Sending Message: " + action_str
logger.Log.info(action_str + "\n")
self.connection.send_message(action_str)
# receive confirmation on the completion of action
(_, reward, _, is_reset) = self.receive_response_and_image()
print "Received reward " + str(reward)
# Update and print metric
sample_expected_reward += running_gamma * reward
running_gamma *= self.gamma
steps += 1
# Reset to a new task
if self.message_protocol_kit.is_reset_message(is_reset):
print "Resetting the episode"
self.connection.send_message("Ok-Reset")
logger.Log.info("Example: " + str(i) + " Instruction: " + instruction + " Steps: " + str(steps))
logger.Log.info("\t Total expected reward: " + str(sample_expected_reward))
logger.Log.info("\t Avg. expected reward: " + str(sample_expected_reward/float(steps)))
logger.Log.info("\n============================================")
logger.Log.flush()
break
avg_bisk_metric = sum_bisk_metric/float(dataset_size)
logger.Log.info("Avg. Bisk Metric " + str(avg_bisk_metric))
logger.Log.info("Testing finished.")
logger.Log.flush()
return avg_bisk_metric
|
chromium/chromium | third_party/android_deps/libs/org_robolectric_shadows_multidex/3pp/fetch.py | Python | bsd-3-clause | 2,494 | 0 | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'org/robolectric'
_MODULE_NAME = 'shadows-multidex'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERR | IDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser | since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
RadoRado/EuroPython2017 | run_python_run/repl/migrations/0001_initial.py | Python | mit | 665 | 0.001504 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-06 20:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
| name='CodeRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('run_id', models.PositiveIntegerField()),
('run_status', models.CharField(max_length=255)),
| ('output', models.TextField()),
],
),
]
|
lbenning/Evolutionary | Symbolic Regression/main.py | Python | mit | 2,227 | 0.042209 | #! /usr/bin/python
from gp import simulate
from gp import randomSearch
import multiprocessing as mp
from plot import graph
import sys
'''
Main script for execution of genetic program for
symbolic regression.
Written by Luke Benning
Last Edited : 1/16/2015
'''
# Retrieve 2d point data from file
def getData(fileName):
points = []
f = open(fileName,'r')
for line in f:
s = line.split(" ")
f = []
for y in s:
if (not y == ''):
f.append(y)
x = (float(f[0]), float(f[1]))
points.append(x)
return points
# Retrieve first elements in tuples
def selectFirst(k):
trimmed = []
for t in k:
trimmed.append(t[0])
return trimmed
# Finds best equation among the simulation results
def findBestEquation(tradGeneticRes,randomRes):
bestScore = -1.0
bestEqn = "None"
for x in tradGeneticRes:
if (x[0][len(x[0])-1] > bestScore):
bestScore = x[0][len(x[0])-1]
bestEqn = x[1]
for x in randomRes:
if (x[0][len(x[0])-1] > bestScore):
bestScore = x[0][len(x[0])-1]
bestEqn = x[1]
return bestEqn
# Initialize full genetic program simulation
def main(filename, iterations, eva, count, processCt):
print "Retrieving dataset..."
data = getData(filename)
intervals = []
intervals.append(1)
for x in range(1,count+1):
intervals.append(x*(eva/count))
processPool = mp.Pool(processes=processCt)
print "Beginning simulations..."
geneticJobs = [processPool.apply_async(simulate,
args=(data,eva,intervals)) for x in range (iterations)]
randomJobs = [processPool.apply_async(randomSearch,
args=(data,eva,intervals)) for x in range (iterations)]
res = [g.get() for g in geneticJobs]
randRes = [r.get() for r in randomJobs]
print "Simulation Complete"
print "Best Equation Found: "
print findBestEquation(res,randRes)
graph(selectFirst(res),selectFirst(randRes),intervals)
'''
arg1 : Filename to read in list of 2d points
arg2 : Number of iterations n > 0
arg3 : Number of evalua | tions m > 0
arg4 : Number of interval points to plot i > 0 (in addition to 1)
arg5 : Maximum number of processes for multiprocessing pool
'''
if __name__ == '__main__':
main(sys.argv[1],
int(sys.argv[2]),
int(sys.argv[3]),
int(sys.argv[4]),
int(sys.argv[5]) | ) |
stephanehenry27/Sickbeard-anime | sickbeard/scheduler.py | Python | gpl-3.0 | 2,593 | 0.002314 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more | details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import dateti | me
import time
import threading
import traceback
from sickbeard import logger
from sickbeard.exceptions import ex
class Scheduler:
def __init__(self, action, cycleTime=datetime.timedelta(minutes=10), runImmediately=True, threadName="ScheduledThread", silent=False):
if runImmediately:
self.lastRun = datetime.datetime.fromordinal(1)
else:
self.lastRun = datetime.datetime.now()
self.action = action
self.cycleTime = cycleTime
self.thread = None
self.threadName = threadName
self.silent = silent
self.initThread()
self.abort = False
def initThread(self):
if self.thread == None or not self.thread.isAlive():
self.thread = threading.Thread(None, self.runAction, self.threadName)
def timeLeft(self):
return self.cycleTime - (datetime.datetime.now() - self.lastRun)
def forceRun(self):
if not self.action.amActive:
self.lastRun = datetime.datetime.fromordinal(1)
return True
return False
def runAction(self):
while True:
currentTime = datetime.datetime.now()
if currentTime - self.lastRun > self.cycleTime:
self.lastRun = currentTime
try:
if not self.silent:
logger.log(u"Starting new thread: "+self.threadName, logger.DEBUG)
self.action.run()
except Exception, e:
logger.log(u"Exception generated in thread "+self.threadName+": " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
if self.abort:
self.abort = False
self.thread = None
return
time.sleep(1)
|
kxgames/kingdoms-of-life | seacow.py | Python | gpl-2.0 | 5,188 | 0.001542 | #!/usr/bin/env python
import os, kxg, pyglet
import tokens, gui, messages
class SandboxLoop (kxg.Loop):
def __init__(self):
world = tokens.World()
referee = tokens.Referee()
window = pyglet.window.Window()
actor = gui.Gui(window)
actor.setup_pregame()
actors_to_greetings = {
actor: messages.CreatePlayer("Sandbox", 'orange')}
game_stage = kxg.SinglePlayerGameStage(
world, referee, actors_to_greetings)
game_stage.successor = PostgameSplashStage(world, actor)
kxg.Loop.__init__(self, game_stage)
class ServerLoop (kxg.Loop):
def __init__(self, host, port):
stage = ServerConnectionStage(host, port)
kxg.Loop.__init__(self, stage)
class ClientLoop (kxg.GuiLoop):
def __init__(self, name, host, port):
stage = ClientConnectionStage(name, host, port)
kxg.GuiLoop.__init__(self, stage)
class ServerConnectionStage (kxg.Stage):
def __init__(self, host, port):
kxg.Stage.__init__(self)
self.pipes = []
self.greetings = []
self.successor = None
self.host, self.port = host, port
self.colors = 'orange', 'purple', 'green'
self.server = kxg.network.Server(
host, port, 2, self.clients_connected)
def setup(self):
print("Seacow server running: {}:{}".format(self.host, self.port))
self.server.open()
def update(self, time):
if not self.server.finished():
self.server.accept()
else:
pending_greetings = False
for greeting in self.greetings:
finished = greeting.update()
if not finished: pending_greetings = True
if not pending_greetings:
self.exit_stage()
def clients_connected(self, pipes):
for pipe in pipes:
greeting = kxg.messaging.SimpleReceive(
pipe, messages.WelcomeClient)
greeting.start()
self.pipes.append(pipe)
self.greetings.append(greeting)
def teardown(self):
print("Clients connected. Game starting.")
world, referee = tokens.World(), tokens.Referee()
pipes_to_messages = {}
for greeting, color in zip(self.greetings, self.colors):
pipe = greeting.get_pipe()
name = greeting.get_message().name
pipes_to_messages[pipe] = messages.CreatePlayer(name, color)
self.successor = kxg.MultiplayerServerGameStage(
world, referee, pipes_to_messages)
self.successor.successor = ServerConnectionStage(
| self.host, self.port + 1)
def get_successor(self):
return self.successor
cla | ss ClientConnectionStage (kxg.Stage):
def __init__(self, name, host, port):
kxg.Stage.__init__(self)
self.name = name
self.host = host
self.port = port
self.update = self.update_connection
self.client = kxg.network.Client(
host, port, callback=self.connection_established)
self.pipe = None
self.conversation = None
self.successor = None
def setup(self):
print("Seacow client running: {}:{} ({})".format(
self.host, self.port, self.name))
window = self.get_master().get_window()
self.gui = gui.Gui(window)
self.gui.setup_pregame()
def update_connection(self, time):
self.client.connect()
def connection_established(self, pipe):
message = messages.WelcomeClient(self.name)
self.conversation = kxg.messaging.SimpleSend(pipe, message)
self.conversation.start()
self.pipe = pipe
self.update = self.update_introduction
def update_introduction(self, time):
self.conversation.update()
if self.conversation.finished():
self.exit_stage()
def teardown(self):
world = tokens.World()
actor = self.gui
pipe = self.pipe
game_stage = kxg.MultiplayerClientGameStage(world, actor, pipe)
postgame_stage = PostgameSplashStage(
world, actor, self.name, self.host, self.port)
self.successor = game_stage
game_stage.successor = postgame_stage
def get_successor(self):
return self.successor
class PostgameSplashStage (kxg.Stage):
def __init__(self, world, gui, name='', host='', port=0):
kxg.Stage.__init__(self)
self.world = world
self.gui = gui
self.name = name
self.host = host
self.port = port
def setup(self):
with self.gui.lock():
self.gui.setup_postgame()
def update(self, time):
pass
def teardown(self):
if self.gui.play_again:
self.gui.teardown_postgame()
self.successor = ClientConnectionStage(
self.name, self.host, self.port + 1)
self.exit_stage()
else:
self.successor = None
def is_finished(self):
return self.gui.postgame_finished()
def get_successor(self):
return self.successor
|
stefanv/selective-inference | selection/distributions/discrete_multiparameter.py | Python | bsd-3-clause | 8,134 | 0.003319 | """
This module contains a class for discrete
multiparameter exponential families. The main
use for this is (post-selection)
maximum likelihood estimation.
Unlike the single parameter family,
we do not provide tools for exact
hypothesis tests and selection intervals.
Approximate (Wald) tests and intervals are
possible after estimation.
"""
import numpy as np
import warnings
class multiparameter_family(object):
def __init__(self, sufficient_stat, weights):
r"""
A discrete multi-parameter dimensional
exponential family with reference measure $\sum_j w_j \delta_{X_j}$
and sufficient statistic `sufficient_stat`.
For any $\theta$, the distribution is
.. math::
P_{\theta} = \sum_{j} e^{\theta X_j - \Lambda(\theta)} w_j \delta_{X_j}
where
.. math::
\Lambda(\theta) = \log \left(\sum_j w_j e^{\theta X_j} \right).
Parameters
----------
sufficient_stat : `np.float((n,k))`
weights : `np.float(n)`
Notes
-----
The weights are normalized to sum to 1.
"""
sufficient_stat = np.asarray(sufficient_stat)
self.n, self.k = sufficient_stat.shape
weights = np.asarray(weights)
if weights.shape != (self.n,):
raise ValueError('expecting weights to have same number of rows as sufficient_stat')
self._w = weights / weights.sum()
self._x = np.asarray(sufficient_stat)
self._theta = np.ones(self.k) * np.nan
@property
def theta(self):
"""
The natural parameter of the family.
"""
return self._theta
@theta. | setter
def theta(self, _theta):
| _theta = np.asarray(_theta)
if not np.all(np.equal(_theta, self._theta)):
_thetaX = np.dot(self.sufficient_stat, _theta)
_largest = _thetaX.max() + 4 # try to avoid over/under flow, 4 seems arbitrary
_exp_thetaX = np.exp(_thetaX - _largest)
_prod = _exp_thetaX * self.weights
self._partition = np.sum(_prod)
self._pdf = _prod / self._partition
self._partition *= np.exp(_largest)
self._theta = _theta
@property
def partition(self):
r"""
Partition function at `self.theta`:
.. math::
\sum_j e^{\theta X_j} w_j
"""
if hasattr(self, "_partition"):
return self._partition
@property
def sufficient_stat(self):
"""
Sufficient statistics of the exponential family.
"""
return self._x
@property
def weights(self):
"""
Weights of the exponential family.
"""
return self._w
def pdf(self, theta):
r"""
Density of $P_{\theta}$ with respect to $P_0$.
Parameters
----------
theta : float
Natural parameter.
Returns
-------
pdf : np.float
"""
self.theta = theta # compute partition if necessary
return self._pdf
def E(self, theta, func):
r"""
Expectation of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
gamma : float(optional)
Weight given at `x`.
Returns
-------
E : np.float
"""
return (func(self.sufficient_stat) * self.pdf(theta)).sum()
def Var(self, theta, func):
r"""
Variance of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
Returns
-------
var : np.float
"""
mu = self.E(theta, func)
return self.E(theta, lambda x: (func(x)-mu)**2)
def Cov(self, theta, func1, func2):
r"""
Covariance of `func1` and `func2` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func1, func2 : callable
Assumed to be vectorized.
Returns
-------
cov : np.float
"""
mu1 = self.E(theta, func1)
mu2 = self.E(theta, func2)
return self.E(theta, lambda x: (func1(x)-mu1)*(func2(x)-mu2))
def mean(self, theta):
r"""
Mean parameter of family at natural
parameter `theta`.
Parameters
----------
theta : np.float(k)
Natural parameter.
Returns
-------
mean : np.float(k)
Expected value of sufficient statistic at theta.
"""
pdf = self.pdf(theta)
return (self.sufficient_stat * pdf[:,None]).sum(0)
def information(self, theta):
r"""
Compute mean and Fisher information
of family at natural parameter `theta`.
Parameters
----------
theta : np.float(k)
Natural parameter.
Returns
-------
mean : np.float(k)
Expected value of sufficient statistic at theta.
information : np.float((k,k))
Covariance matrix of sufficient statistic at theta.
"""
pdf = self.pdf(theta)
mean = self.mean(theta)
outer_prods = np.einsum('ij,ik->ijk', self.sufficient_stat, self.sufficient_stat)
information = (outer_prods * pdf[:,None,None]).sum(0) - np.outer(mean, mean)
return mean, information
def MLE(self, observed_sufficient,
min_iters=3,
max_iters=20,
tol=1.e-6,
initial=None):
r"""
Compute mean and Fisher information
of family at natural parameter `theta`.
Parameters
----------
observed_sufficient : np.float(k)
Observed sufficient statistic.
min_iters : int
Minimum number of Newton steps.
max_iters : int
How many Newton steps?
tol : float
Relative tolerance for objective value.
Returns
-------
theta_hat : np.float(k)
Maximum likelihood estimate.
"""
_old_theta, _old_pdf, _old_partition = (self._theta.copy(),
self._pdf.copy(),
self._partition)
if initial is None:
if np.all(np.isnan(self.theta)):
initial = np.zeros(k)
else:
initial = self.theta
theta = initial
value = np.inf
for i in range(max_iters):
mean, hess = self.information(theta)
grad = mean - observed_sufficient
direction = np.linalg.solve(hess, grad)
step_size = 1.
while True:
proposed_theta = theta - step_size * direction
self.theta = proposed_theta
proposed_value = np.log(self.partition) - (self.theta * observed_sufficient).sum()
if proposed_value < value:
break
else:
step_size *= 0.9
# when do we stop?
if i > min_iters and np.fabs((value - proposed_value) / value) < tol:
break
value = proposed_value
theta = proposed_theta
# reset old values -- should we?
self._theta, self._pdf, self._partition = (_old_theta,
_old_pdf,
_old_partition)
return theta
|
ArtemMIPT/sentiment_analysis | app.py | Python | mit | 4,310 | 0.00464 | import functools
import re
import os
from flask import Flask, render_template, request, jsonify
from extensions import db, login_manager, csrf
import config
from sentiment_classifiers import SentimentClassifier, files, binary_dict
from vk_parser import VkFeatureProvider
app = Flask(__name__)
###############################################################################
# App managing function
###############################################################################
def create_app(cfg=None, app_name=None):
"""Create a Flask app."""
if app_name is None:
app_name = config.DefaultConfig.PROJECT
app = Flask(app_name) # TODO: check params
configure_app(app, cfg)
configure_extensions(app)
return app
def configure_app(app, cfg=None):
if not cfg:
cfg = config.DefaultConfig
app.config.from_object(cfg)
application_mode = os.getenv('APPLICATION_MODE', 'LOCAL')
app.config.from_object(config.get_config(application_mode))
def configure_extensions(app):
db.init_app(app)
login_manager.login_view = 'frontend.login'
login_manager.refresh_view = 'frontend.login'
@login_manager
def load_user(id):
pass # TODO: make it works
#return user.query.get(id)
login_manager.setup_app(app)
csrf.init_app(app)
###############################################################################
#
###############################################################################
def return_json(func):
@functools.wraps(func)
def inner(*args, **kwargs):
return jsonify(func(*args, **kwargs))
return inner
@app.route('/')
def index():
return render_template('index.html')
@app.route('/get_vk_json', methods=['GET', 'POST'])
@return_json
def get_vk_info():
if request.method != 'POST':
return None
provider = VkFeatureProvider()
publics = request.form.get('publics', None)
num_posts = request.form.get('num_posts', None)
if not publics or not num_posts:
raise ValueError('Arguments are wrong publics: {}, num_posts: {}'
.format(publics, num_posts))
publics = re.findall(r'[\w.]+', publics)
return provider.get_news(publics, int(num_posts))
###############################################################################
# Models
###############################################################################
@app.route('/vk_sentiment', methods=['GET', 'POST'])
def vk_sentiment():
return render_template('vk_sentiment.html')
@app.route('/bank_sentiment', methods=['GET', 'POST'])
def bank_sentiment():
#classifier = SentimentClassifier(files[')
text = ''
prediction_message = ''
if request.method == 'POST':
text = request.form['text']
return render_template('bank_sentiment.html', text=text, prediction_message=prediction_message)
@app.route('/movie_binary_sentiment', methods=['GET', 'POST'])
def movie_binary_sentiment():
classifier = SentimentClassifier(files['binary_movie'], binary_dict)
text = ''
prediction_message = ''
if request.method == 'POST':
text = request.form['text']
prediction_message = classifier.get_prediction_message(text)
return render_template('movie_binary_sentiment.html', text=text, prediction_message=prediction_message)
@app.route('/goods_binary_sentiment', methods=['GET', 'POST'])
def goods_binary_sentiment():
classifier = SentimentClassifier(files['binary_goods'], binary_dict)
text = ''
prediction_message = ''
if request.method == 'P | OST':
text = request.form['text']
prediction_message = classifier.get_prediction_message(text)
return render_template('goods_binary_sentiment.html', text=text, prediction_message=prediction_message)
###############################################################################
# Info pages
### | ############################################################################
@app.route('/algo.html')
def algo():
return render_template('algo.html')
@app.route('/about.html')
def about():
return render_template('about.html')
if __name__ == "__main__":
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
app.debug = True
app.run()
|
terracoin/terracoin | qa/rpc-tests/nodehandling.py | Python | mit | 3,440 | 0.011047 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class NodeHandlingTest (BitcoinTestFramework):
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
time.sleep( | 2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
########################### |
# RPC disconnectnode test #
###########################
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
|
15th/simpleat | simpleat/core/log/writer.py | Python | mit | 1,220 | 0.00838 | #!/usr/bin/env python
#coding=utf-8
# Filename: writer.py
'''
日志记录
@author: 1th
@data: 2017.2.28
'''
from time import sleep
import datetime
from simpleat.conf import settings, globalvar
from simpleat.core import exceptions
from .logger import write_log
_CMD_OUT = settings.CMD_OUT # 是否在命令行进行输出
_LOG_OUT = settings.LOG_OUT # 程序运行过程中是否开启日志输出
_LOG_DIR = settings.LOG_DIR # log文件存储文件夹
def log(logmsg, level, logstr=_LOG_DIR):
'''
纪录日志,自动获取当前时间
Args:
level: 日志等级
logstr: log文件文件夹
'''
fulllogmsg = ''.join(['[', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \
'] - ', level, ' - ', unicode(logmsg)])
try:
write_log(fulllogmsg, level, logstr)
except exceptions.WriteLogException as wle:
if _CMD_OUT:
pr | int unicode(wle)
def logger():
'''
自动检查LOG_MESSAGE中是否有需要记录的日志
'''
while True:
if not globalvar.g_hold_lognote.empty():
| content, level = globalvar.g_hold_lognote.get()
if _LOG_OUT:
log(content, level)
sleep(0.5)
|
asposecells/Aspose_Cells_Cloud | Examples/Python/Examples/SetModifyPassword.py | Python | mit | 1,577 | 0.008878 | import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
fr | om asposecellscloud.CellsApi import ApiException
from asposecellscloud.models import PasswordRequest
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServe | r = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
body = PasswordRequest.PasswordRequest()
body.Password = "aspose"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to set modify password of a workbook
response = cellsApi.PutDocumentProtectFromChanges(name=filename, body=body)
if response.Status == "OK":
#download protected document from cloud storage
response = storageApi.GetDownload(Path=filename)
outfilename = "c:/temp/" + "password_protected_" + filename
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
Linkid/fofix | fofix/game/Lobby.py | Python | gpl-2.0 | 37,782 | 0.007967 | #####################################################################
# -*- coding: utf-8 -*- #
# #
# Frets on Fire X #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 rchiav #
# 2009 Team FoFiX #
# 2009 akedrou #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from __future__ import with_statement
import shutil
import os
from OpenGL.GL import *
import pygame
from fofix.core.Player import GUITARTYPES, DRUMTYPES, MICTYPES
from fofix.core.Input import KeyListener
from fofix.core.Image import drawImage
from fofix.core.constants import *
from fofix.core.Language import _
from fofix.core.View import Layer
from fofix.game import Dialogs
from fofix.core import Player
from fofix.game import song
class WorldNotStarted(Exception):
def __str__(self):
return _("World error. Please try again.")
class Lobby(Layer, KeyListener):
def __init__(self, engine):
if not engine.world:
raise WorldNotStarted
self.engine = engine
self.minPlayers = self.engine.world.minPlayers
self.maxPlayers = self.engine.world.maxPlayers
self.tutorial = self.engine.world.tutorial
self.gameMode = self.engine.world.gameMode
self.multiMode = self.engine.world.multiMode
self.time = 0.0
self.keyControl = 0
self.keyGrab = False
self.scrolling = [0,0,0,0]
self.rate = [0,0,0,0]
self.delay = [0,0,0,0]
self.scroller = [0, self.scrollUp, self.scrollDown]
self.gameStarted = False
self.done = True
self.active = False
self.blockedItems = [1]
self.selectedItems = []
self.blockedPlayers = []
self.selectedPlayers = []
self.playerList = [None for i in range(4)]
self.fullView = self.engine.view.geometry[2:4]
self.music = True
self.creator = CreateCharacter(self.engine)
#key vars
self.fontDict = self.engine.data.fontDict
self.geometry = self.engine.view.geometry[2:4]
self.fontScreenBottom = self.engine.data.fontScreenBottom
self.aspectRatio = self.engine.view.aspectRatio
self.drawStarScore = self.engine.drawStarScore
self.gameModeText = self.engine.world.gameName
self.yes = []
self.no = []
self.conf = []
self.up = []
self.down = []
self.controls = [j for j in self.engine.input.controls.controls]
self.types = []
self.allowed = [True for i in range(4)]
for i, type in enumerate(self.engine.input.controls.type):
self.types.append(type)
if type in GUITARTYPES:
if not self.engin | e.world.allowGuitar:
self.allowed[i] = False
else:
self.yes.extend([Player.CONTROLS[i][Player.KEY1], Play | er.CONTROLS[i][Player.KEY1A], Player.CONTROLS[i][Player.START]])
self.no.extend([Player.CONTROLS[i][Player.KEY2], Player.CONTROLS[i][Player.KEY2A], Player.CONTROLS[i][Player.CANCEL]])
self.conf.extend([Player.CONTROLS[i][Player.KEY3], Player.CONTROLS[i][Player.KEY3A]])
self.up.extend([Player.CONTROLS[i][Player.ACTION1], Player.CONTROLS[i][Player.UP]])
self.down.extend([Player.CONTROLS[i][Player.ACTION2], Player.CONTROLS[i][Player.DOWN]])
elif type in DRUMTYPES:
if not self.engine.world.allowDrum:
self.allowed[i] = False
else:
self.yes.extend([Player.CONTROLS[i][Player.DRUM5], Player.CONTROLS[i][Player.DRUM5A], Player.CONTROLS[i][Player.START]])
self.no.extend([Player.CONTROLS[i][Player.DRUM1], Player.CONTROLS[i][Player.DRUM1A], Player.CONTROLS[i][Player.CANCEL]])
self.conf.extend([Player.CONTROLS[i][Player.DRUMBASS], Player.CONTROLS[i][Player.DRUMBASSA]])
self.up.extend([Player.CONTROLS[i][Player.DRUM2], Player.CONTROLS[i][Player.DRUM2A], Player.CONTROLS[i][Player.UP]])
self.down.extend([Player.CONTROLS[i][Player.DRUM3], Player.CONTROLS[i][Player.DRUM3A], Player.CONTROLS[i][Player.DOWN]])
elif type in MICTYPES:
if not self.engine.world.allowMic:
self.allowed[i] = False
else:
self.yes.extend([Player.CONTROLS[i][Player.START]])
self.no.extend([Player.CONTROLS[i][Player.CANCEL]])
self.up.extend([Player.CONTROLS[i][Player.UP]])
self.down.extend([Player.CONTROLS[i][Player.DOWN]])
for i, control in enumerate(self.engine.input.controls.controls):
if control == "None":
self.controls[i] = _("No Controller")
self.blockedPlayers.append(i)
elif not self.allowed[i]:
self.controls[i] = _("Disabled Controller")
self.blockedPlayers.append(i)
elif control == "defaultg":
self.controls[i] = _("Default Guitar")
elif control == "defaultd":
self.controls[i] = _("Default Drums")
elif control == "defaultm":
self.controls[i] = _("Default Microphone")
if 4 - len(self.blockedPlayers) < self.minPlayers:
Dialogs.showMessage(self.engine, _("Your controls are not properly set up for this mode. Please check your settings."))
#FIXME: Go back to the main menu (or the control menu) without screwing up the layers.
self.engine.input.activeGameControls = [i for i in range(4) if i not in self.blockedPlayers]
self.engine.input.pluginControls()
self.panelOrder = range(4)
self.oldOrder = range(4)
themename = self.engine.data.themeLabel
self.theme = self.engine.theme
self.engine.data.loadAllImages(self, os.path.join("themes",themename,"lobby"))
self.partImages = self.engine.data.partImages
if not self.img_default_av:
self.engine.data.loadImgDrawing(self, "img_default_av", os.path.join("users", "players", "default.png"))
if not self.img_newchar_av:
self.engine.data.loadImgDrawing(self, "img_newchar_av", os.path.join("users", "players", "newchar_av.png"))
if self.img_default_av:
imgheight = self.img_default_av.height1()
imgwidth = self.img_default_av.width1()
hFactor = self.theme.lobbyPanelAvatarDimension[1]/imgheight
wFactor = self.theme.lobbyPanelAvatarDimension |
gtrdotmcs/gunicorn | tests/test_gaiohttp.py | Python | mit | 6,628 | 0.001056 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import unittest
import pytest
aiohttp = pytest.importorskip("aiohttp")
from aiohttp.wsgi import WSGIServerHttpProtocol
import asyncio
from gunicorn.workers import gaiohttp
from gunicorn.workers._gaiohttp import _wrp
from gunicorn.config import Config
from unittest import mock
class WorkerTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.worker = gaiohttp.AiohttpWorker('age',
'ppid',
| 'sockets',
'app',
'timeout',
Config(),
'log')
def tearDown(self):
self.loop.close()
@mock.patch('gunicorn.workers._gaiohttp.asyncio')
def test_init_process(self, m_asyncio):
try:
self.worker.init_process()
except TypeError:
| # to mask incomplete initialization of AiohttWorker instance:
# we pass invalid values for ctor args
pass
self.assertTrue(m_asyncio.get_event_loop.return_value.close.called)
self.assertTrue(m_asyncio.new_event_loop.called)
self.assertTrue(m_asyncio.set_event_loop.called)
@mock.patch('gunicorn.workers._gaiohttp.asyncio')
def test_run(self, m_asyncio):
self.worker.loop = mock.Mock()
self.worker.run()
self.assertTrue(m_asyncio.async.called)
self.assertTrue(self.worker.loop.run_until_complete.called)
self.assertTrue(self.worker.loop.close.called)
def test_factory(self):
self.worker.wsgi = mock.Mock()
self.worker.loop = mock.Mock()
self.worker.log = mock.Mock()
self.worker.cfg = mock.Mock()
f = self.worker.factory(
self.worker.wsgi, ('localhost', 8080))
self.assertIsInstance(f, WSGIServerHttpProtocol)
@mock.patch('gunicorn.workers._gaiohttp.asyncio')
def test__run(self, m_asyncio):
self.worker.ppid = 1
self.worker.alive = True
self.worker.servers = []
sock = mock.Mock()
sock.cfg_addr = ('localhost', 8080)
self.worker.sockets = [sock]
self.worker.wsgi = mock.Mock()
self.worker.log = mock.Mock()
self.worker.notify = mock.Mock()
loop = self.worker.loop = mock.Mock()
loop.create_server.return_value = asyncio.Future(loop=self.loop)
loop.create_server.return_value.set_result(sock)
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.log.info.called)
self.assertTrue(self.worker.notify.called)
@mock.patch('gunicorn.workers._gaiohttp.asyncio')
def test__run_unix_socket(self, m_asyncio):
self.worker.ppid = 1
self.worker.alive = True
self.worker.servers = []
sock = mock.Mock()
sock.cfg_addr = '/tmp/gunicorn.sock'
self.worker.sockets = [sock]
self.worker.wsgi = mock.Mock()
self.worker.log = mock.Mock()
self.worker.notify = mock.Mock()
loop = self.worker.loop = mock.Mock()
loop.create_server.return_value = asyncio.Future(loop=self.loop)
loop.create_server.return_value.set_result(sock)
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.log.info.called)
self.assertTrue(self.worker.notify.called)
def test__run_connections(self):
conn = mock.Mock()
self.worker.ppid = 1
self.worker.alive = False
self.worker.servers = [mock.Mock()]
self.worker.connections = {1: conn}
self.worker.sockets = []
self.worker.wsgi = mock.Mock()
self.worker.log = mock.Mock()
self.worker.loop = self.loop
self.worker.loop.create_server = mock.Mock()
self.worker.notify = mock.Mock()
def _close_conns():
self.worker.connections = {}
self.loop.call_later(0.1, _close_conns)
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.log.info.called)
self.assertTrue(self.worker.notify.called)
self.assertFalse(self.worker.servers)
self.assertTrue(conn.closing.called)
@mock.patch('gunicorn.workers._gaiohttp.os')
@mock.patch('gunicorn.workers._gaiohttp.asyncio.sleep')
def test__run_exc(self, m_sleep, m_os):
m_os.getpid.return_value = 1
m_os.getppid.return_value = 1
self.worker.servers = [mock.Mock()]
self.worker.ppid = 1
self.worker.alive = True
self.worker.sockets = []
self.worker.log = mock.Mock()
self.worker.loop = mock.Mock()
self.worker.notify = mock.Mock()
slp = asyncio.Future(loop=self.loop)
slp.set_exception(KeyboardInterrupt)
m_sleep.return_value = slp
self.loop.run_until_complete(self.worker._run())
self.assertTrue(m_sleep.called)
self.assertTrue(self.worker.servers[0].close.called)
def test_close_wsgi_app(self):
self.worker.ppid = 1
self.worker.alive = False
self.worker.servers = [mock.Mock()]
self.worker.connections = {}
self.worker.sockets = []
self.worker.log = mock.Mock()
self.worker.loop = self.loop
self.worker.loop.create_server = mock.Mock()
self.worker.notify = mock.Mock()
self.worker.wsgi = mock.Mock()
self.worker.wsgi.close.return_value = asyncio.Future(loop=self.loop)
self.worker.wsgi.close.return_value.set_result(1)
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.wsgi.close.called)
self.worker.wsgi = mock.Mock()
self.worker.wsgi.close.return_value = asyncio.Future(loop=self.loop)
self.worker.wsgi.close.return_value.set_exception(ValueError())
self.loop.run_until_complete(self.worker._run())
self.assertTrue(self.worker.wsgi.close.called)
def test_wrp(self):
conn = object()
tracking = {}
meth = mock.Mock()
wrp = _wrp(conn, meth, tracking)
wrp()
self.assertIn(id(conn), tracking)
self.assertTrue(meth.called)
meth = mock.Mock()
wrp = _wrp(conn, meth, tracking, False)
wrp()
self.assertNotIn(1, tracking)
self.assertTrue(meth.called)
|
carlosbeatortega/sociedades | web/node/node_modules/npm/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/MSVSSettings.py | Python | mit | 42,889 | 0.002868 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return m | sbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
R | aises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
|
MichelDeudon/neural-combinatorial-optimization-rl-tensorflow | Self_Net_TSP/config.py | Python | mit | 3,341 | 0.022448 | #-*- coding: utf-8 -*-
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='Configuration file')
arg_lists = []
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
def str2bool(v):
return v.lower() in ('true', '1')
# Network
net_arg = add_argument_group('Network')
net_arg.add_argument('--hidden_dim', type=int, default=128, help='actor LSTM num_neurons')
net_arg.add_argument('--num_heads', type=int, default=16, help='actor input embedding') ###
net_arg.add_argument('--num_stacks', type=int, default=3, help='actor LSTM num_neurons')
# Data
data_arg = add_argument_group('Data')
data_arg.add_argument('--batch_size', type=int, default=128, help='batch size')
data_arg.add_argument('--input_dimension', type=int, default=2, help='city dimension')
data_arg.add_argument('--max_length', type=int, default=20, help='number of deliveries')
# Training / test parameters
train_arg = add_argument_group('Training')
train_arg.add_argument('--nb_epoch', type=int, default=100000, help='nb epoch')
train_arg.add_argument('--lr1_start', type=float, default=0.001, help='actor learning rate')
train_arg.add_argument('--lr1_decay_step', type=int, default=5000, help='lr1 decay step')
train_arg.add_argument('--lr1_decay_rate', type=float, default=0.96, help='lr1 decay rate')
train_arg.add_argument('--alpha', type=float, default=0.99, help='update factor moving average baseline')
train_arg.add_argument('--init_baseline', type=float, default=7.0, help='initial baseline - REINFORCE')
train_arg.add_argument('--temperature', type=float, default=3.0, help='pointer_net initial temperature')
train_arg.add_argument('--C', type=float, default=10.0, help='pointer_net tan clipping')
# Misc
misc_arg = | add_argument_group('User options') #####################################################
misc_arg.add_argument('--inference_mode', type=str2bool, default=True, help='switch to inference mode when model is trained')
misc_arg.add_argum | ent('--restore_model', type=str2bool, default=True, help='whether or not model is retrieved')
misc_arg.add_argument('--save_to', type=str, default='20/model', help='saver sub directory')
misc_arg.add_argument('--restore_from', type=str, default='20/model', help='loader sub directory') ###
misc_arg.add_argument('--log_dir', type=str, default='summary/20/repo', help='summary writer log directory')
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
def print_config():
config, _ = get_config()
print('\n')
print('Data Config:')
print('* Batch size:',config.batch_size)
print('* Sequence length:',config.max_length)
print('* City coordinates:',config.input_dimension)
print('\n')
print('Network Config:')
print('* Restored model:',config.restore_model)
print('* Actor hidden_dim (embed / num neurons):',config.hidden_dim)
print('* Actor tan clipping:',config.C)
print('\n')
if config.inference_mode==False:
print('Training Config:')
print('* Nb epoch:',config.nb_epoch)
print('* Temperature:',config.temperature)
print('* Actor learning rate (init,decay_step,decay_rate):',config.lr1_start,config.lr1_decay_step,config.lr1_decay_rate)
else:
print('Testing Config:')
print('* Summary writer log dir:',config.log_dir)
print('\n') |
max1k/cbs | cbs/settings.py | Python | gpl-2.0 | 2,196 | 0 | """
Django settings for cbs project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '51ff&6zumcwpo8+60&5+dg5nqh6-ehdo@uk-xi$*paicy7b4e%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib | .contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'p311',
'p365',
)
MIDDLEWARE_CLASSES = (
'django.contrib. | sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cbs.urls'
WSGI_APPLICATION = 'cbs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
|
mapzen/vector-datasource | integration-test/1178-earlier-piers.py | Python | mit | 3,414 | 0 | # -*- encoding: utf-8 -*-
from shapely.wkt import loads as wkt_loads
import dsl
from . import FixtureTest
class EarlierPiers(FixtureTest):
def test_very_large_pier(self):
# a very, very large pier which alters the coastline visually, so
# should be kept until z11.
self.generate_fixtures(dsl.way(377915546, wkt_loads('POLYGON ((121.369997686123 25.16007715057551, 121.389781463288 25.16886019888699, 121.393176645904 25.16588309891378, 121.393223358299 25.16575162904959, 121.393218956554 25.1655327562094, 121.380700483913 25.16002364948639, 121.380536810868 25.16032132014178, 121.371928345332 25.15653415695679, 121.369997686123 25.16007715057551))'), {u'name': u'\u7b2c\u4e00\u8ca8\u6ac3\u5132\u904b\u4e2d\u5fc3', u'area': u'yes', u'way_area': u'1.31001e+06', u'man_made': u'pier', u'source': u'openstreetmap.org', u'operator': u'\u53f0\u5317\u6e2f\u8ca8\u6ac3\u78bc\u982d\u516c\u53f8'})) # noqa
self.assert_has_feature(
11, 1714, 876, 'landuse',
{'id': 377915546, 'kind': 'pier', 'min_zoom': 11})
def test_cruise_terminal(self):
# zoom 11 for Cruise Terminal with area 53,276
self.generate_fixtures(dsl.way(275609726, wkt_loads('POLYGON ((-117.176719701526 32.7179229716355, -117.176661221201 32.71797111545498, -117.173474717225 32.7179511626036, -117.173464925588 32.7168440746718, -117.17667819936 32.71684596416999, -117.176703711514 32.7168737019992, -117.176719701526 32.7179229716355))'), {u'source': u'openstreetmap.org', u'way_area': u'53276.3', u'man_made': u'pier', u'name': u'Cruise Terminal'})) # noqa
self.assert_has_feature(
11, 357, 826, 'landuse',
{'id': 275609726, 'kind': 'pier', 'min_zoom': 11.22})
def test_broadway_pier(self):
# zoom 12 for Broadway Pier with area 17,856
self.generate_fixtures(dsl.way(275609725, wkt_loads('POLYGON ((-117.176717725232 32.71556026475498, -117.176717725232 32.71586825686789, -117.176676223066 32.7158965240661, -117.173469596828 32.71589319851391, -117.173405187622 32.7158976577771, -117.173402492676 32.71553320674671, -117.17348028678 32.71552640445288, -117.173583593037 32.7155264800339, -117.176681523126 32.7155310148966, -117.176717725232 32.71556026475498))'), {u'source': u'openstreetmap.org', u'way_area': u'17855.9', u'man_made': u'pier', u'name': u'Broadway Pier'})) # noqa
self.assert_has_feature(
12, 714, 1653, 'landuse',
{'id': 275609725, 'kind': 'pier', 'min_zoom': 12})
def test_smaller_unnamed_pier(self):
# zoom 12 for unnamed pier with area 4,734
self.generate_fixtures(dsl.way(275609722, wkt_loads('POLYGON ((-117.175814469214 32.71132656919218, -117.175718529142 32.71141069481718, -117.17566966079 32.71143488186461, -117.175632380706 32.7114030607791, -117.175599322703 32.7113747921568, -117.175648909707 32.711338209220 | 49, -117.175576505495 32.71128 | 00846483, -117.175587195447 32.71127222384509, -117.174076408802 32.7098766179425, -117.174094464939 32.7098630124915, -117.173743762653 32.70953239939531, -117.173824341534 32.70947366895718, -117.175814469214 32.71132656919218))'), {u'source': u'openstreetmap.org', u'way_area': u'4734.17', u'man_made': u'pier', u'area': u'yes'})) # noqa
self.assert_has_feature(
12, 714, 1653, 'landuse',
{'id': 275609722, 'kind': 'pier', 'min_zoom': 12.96})
|
Ecpy/ecpy | exopy/instruments/user.py | Python | bsd-3-clause | 1,868 | 0 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Declaration of plugin susceptible to use instruments
"""
from atom.api import Enum, Str
from enaml.core.declarative import Declarative, d_, d_func
class InstrUser(Declarative):
"""Extension to the 'exopy.instruments.users' extensions point.
"""
#: Plugin id associated with this use of instrument. This allow the manager
#: to know what part of the application requested the right to use some
#: drivers.
id = d_(Str())
#: Is the plugin susceptible to release the profiles it is currently using
#: if the manager asks it to do so.
policy = d_(Enum('releasable', 'unreleasable'))
@d_func
def release_profiles(self, workbench, profiles):
"""Release the specified profiles or some of them.
This call can block until the profiles can be released (if it is likely
to happen in a relatively short time). The
'exopy.instruments.notify_profiles_released' command should not be
called (the manager knows what profiles it requested and will handle
the tracking of the current user for each profile itself).
Parameters
----------
| workbench :
Application workbench.
profiles : list[unicode]
List of profiles the manager is requesting the user to release.
Returns
| -------
released_profiles : list[unicode]
List of the profiles that have been released.
"""
raise NotImplementedError()
|
battlemidget/conjure-up | macumba/ws.py | Python | mit | 2,690 | 0 | from ws4py.client.threadedclient import WebSocketClient
import json
import threading
import logging
from .errors import ConnectionClosedError, UnknownRequestError
log = logging.getLogger('macumba')
class JujuWS(WebSocketClient):
def __init__(self, url, password, protocols=['https-only'],
| extensions=None, ssl_options=None, headers=None,
start_reqid=1):
WebSocketClient.__init__(self, url, protocols, extensions,
ssl_options=ssl_options, headers=headers)
self.open_done = threading.Event()
self.rid_lock = threading.RLock()
self.msglock = threading.RLock()
self.messages = {}
self._cur_request | _id = start_reqid
# WebSocketClient subclass overrides, run in private thread:
def opened(self):
self.open_done.set()
def received_message(self, m):
msg = json.loads(m.data.decode('utf-8'))
msg_req_id = msg['RequestId']
with self.msglock:
self.messages[msg_req_id] = msg
def closed(self, code, reason=None):
log.debug("socket closed: code:{} reason:{}".format(code, reason))
# actions for users of the class:
def get_current_request_id(self):
"only intended to pass to constructor of a replacing client"
return self._cur_request_id
def do_close(self):
self.close()
def do_connect(self, creds):
self.connect()
self.open_done.wait()
self.open_done.clear()
rv = self.do_send(creds)
return rv
def do_send(self, json_message):
with self.rid_lock:
self._cur_request_id += 1
request_id = self._cur_request_id
json_message['RequestId'] = request_id
self.send(json.dumps(json_message))
with self.msglock:
self.messages[request_id] = None
return request_id
def do_receive(self, request_id):
"""Checks for message matching request_id.
Will return None if message has not arrived yet.
Raises UnknownRequestError if request_id hasn't been sent yet
(or was already received).
"""
if self.terminated:
raise ConnectionClosedError
with self.msglock:
if request_id not in self.messages:
errmsg = ("{} not in messages. "
"cur = {}".format(request_id,
self._cur_request_id))
raise UnknownRequestError(errmsg)
message = self.messages[request_id]
if message is not None:
del self.messages[request_id]
return message
|
sysr-q/fah | fah/room.py | Python | mit | 759 | 0.02635 | #!/usr/bin/env python2
from __future__ import print_function
from collections import defaultdict
import uuid
class Room(object):
def __init__(self):
self.name = None
self.users = {} |
self.expansions = [0] # 0 -> Base
class User(object):
def __init__(self, handle, trip=None, first=False):
self.uuid = uuid.uuid4()
self.handle = handle
self.trip = trip
se | lf.first = first
self.rooms = [] # {name: _, owner: _}
self.cards = [] # list of ids (usually 10)
# NOTE: don't remove these, they might keep shit working. Can't remember.
# Sorry, future maintainer(s).
def __new__(cls, *args, **kwargs):
return super(User, cls).__new__(cls, *args, **kwargs)
def __copy__(self):
return self
def __deepcopy__(self):
return self
|
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/site-packages/cryptography-0.5.2-py2.7-linux-x86_64.egg/cryptography/hazmat/primitives/serialization.py | Python | apache-2.0 | 909 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
def load_pem_traditional_openssl_private | _key(data, password, backend):
return backend.load_traditional_openssl_pem_private_key(
data, password
)
def load_pem_pkc | s8_private_key(data, password, backend):
return backend.load_pkcs8_pem_private_key(
data, password
)
|
tanutarou/OptBoard | optboard/wsgi.py | Python | mit | 394 | 0 | """
WSGI config for optboard project.
It exposes the WSGI callable as a module-leve | l variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto | /deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "optboard.settings")
application = get_wsgi_application()
|
zozo123/buildbot | master/buildbot/schedulers/base.py | Python | gpl-3.0 | 10,933 | 0.000823 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot import config
from buildbot import interfaces
from buildbot.changes import changes
from buildbot.process.properties import Properties
from buildbot.util.service import ClusteredService
from buildbot.util.state import StateMixin
from twisted.internet import defer
from twisted.python import failure
from twisted.python import log
from zope.interface import implements
class BaseScheduler(ClusteredService, StateMixin):
implements(interfaces.IScheduler)
DEFAULT_CODEBASES = {'': {}}
compare_attrs = ClusteredService.compare_attrs + \
('builderNames', 'properties', 'codebases')
def __init__(self, name, builderNames, properties,
codebases=DEFAULT_CODEBASES):
ClusteredService.__init__(self, name)
ok = True
if not isinstance(builderNames, (list, tuple)):
ok = False
else:
for b in builderNames:
if not isinstance(b, basestring):
ok = False
if not ok:
config.error(
"The builderNames argument to a scheduler must be a list "
"of Builder names.")
self.builderNames = builderNames
self.properties = Properties()
self.properties.update(properties, "Scheduler")
self.properties.setProperty("scheduler", name, "Scheduler")
self.objectid = None
self.master = None
# Set the codebases that are necessary to process the changes
# These codebases will always result in a sourcestamp with or without
# changes
if codebases is not None:
if not isinstance(codebases, dict):
config.error("Codebases must be a dict of dicts")
for codebase, codebase_attrs in codebases.iteritems():
if not isinstance(codebase_attrs, dict):
config.error("Codebases must be a dict of dicts")
if (codebases != BaseScheduler.DEFAULT_CODEBASES and
'repository' not in codebase_attrs):
config.error(
"The key 'repository' is mandatory in codebases")
else:
config.error("Codebases cannot be None")
self.codebases = codebases
# internal variables
self._change_consumer = None
self._change_consumption_lock = defer.DeferredLock()
# activity handling
def activate(self):
return defer.succeed(None)
def deactivate(self):
return defer.maybeDeferred(self._stopConsumingChanges)
# service handling
def _getServiceId(self):
return self.master.data.updates.findSchedulerId(self.name)
def _claimService(self):
return self.master.data.updates.trySetSchedulerMaster(self.serviceid,
self.master.masterid)
def _unclaimService(self):
return self.master.data.updates.trySetSchedulerMaster(self.serviceid,
None)
# status queries
# deprecated: these aren't compatible with distributed schedulers
def listBuilderNames(self):
return self.builderNames
def getPendingBuildTimes(self):
return []
# change handling
@defer.inlineCallbacks
def startConsumingChanges(self, fileIsImportant=None, change_filter=None,
onlyImportant=False):
assert fileIsImportant is None or callable(fileIsImportant)
# register for changes with the data API
assert not self._change_consumer
self._change_consumer = yield self.master.data.startConsuming(
lambda k, m: self._changeCallback(k, m, fileIsImportant,
change_filter, onlyImportant),
{},
('changes',))
@defer.inlineCallbacks
def _changeCallback(self, key, msg, fileIsImportant, change_filter,
onlyImportant):
# ignore changes delivered while we're not running
if not self._change_consumer:
return
# get a change object, since the API requires it
chdict = yield self.master.db.changes.getChange(msg['changeid'])
change = yield changes.Change.fromChdict(self.master, chdict)
# filter it
if change_filter and not change_filter.filter_change(change):
return
if change.codebase not in self.codebases:
log.msg(format='change contains codebase %(codebase)s that is '
'not processed by scheduler %(name)s',
codebase=change.codebase, name=self.name)
return
if fileIsImportant:
try:
important = fileIsImportant(change)
if not important and onlyImportant:
return
except:
log.err(failure.Failure(),
'in fileIsImportant check for %s' % change)
return
else:
important = True
# use change_consumption_lock to ensure the service does not stop
# while this change is being processed
d = self._change_consumption_lock.run(
self.gotChange, change, important)
d.addErrback(log.err, 'while processing change')
def _stopConsumingChanges(self):
# (note: called automatically in deactivate)
# acquire the lock change consumption lock to ensure that any change
# consum | ption is complete before we are done stopping consumption
def stop():
if self._change_c | onsumer:
self._change_consumer.stopConsuming()
self._change_consumer = None
return self._change_consumption_lock.run(stop)
def gotChange(self, change, important):
raise NotImplementedError
# starting builds
def addBuildsetForSourceStampsWithDefaults(self, reason, sourcestamps,
waited_for=False, properties=None, builderNames=None,
**kw):
if sourcestamps is None:
sourcestamps = []
# convert sourcestamps to a dictionary keyed by codebase
stampsByCodebase = {}
for ss in sourcestamps:
cb = ss['codebase']
if cb in stampsByCodebase:
raise RuntimeError("multiple sourcestamps with same codebase")
stampsByCodebase[cb] = ss
# Merge codebases with the passed list of sourcestamps
# This results in a new sourcestamp for each codebase
stampsWithDefaults = []
for codebase in stampsByCodebase:
ss = self.codebases.get(codebase, {}).copy()
# apply info from passed sourcestamps onto the configured default
# sourcestamp attributes for this codebase.
ss.update(stampsByCodebase[codebase])
stampsWithDefaults.append(ss)
return self.addBuildsetForSourceStamps(sourcestamps=stampsWithDefaults,
reason=reason, waited_for=waited_for, properties=properties,
builderNames=builderNames,
**kw)
def getCodebaseDict(self, codebase):
# Hook for subclasses to change codebase parameters when a codebase does
# not have a change associat |
CrowdStrike/mellon | mellon/interfaces.py | Python | mit | 4,243 | 0.007778 | from zope import component
from zope import interface
from zope.interface.interfaces import IObjectEvent
from zope import location
from sparc.configuration.container import ISparcPyContainerConfiguredApplication
#EVENTS
class ISnippetAvailableForSecretsSniffEvent(IObjectEvent):
"""An object providing ISnippet is ready to be sniffed for secrets"""
#APPLICATION & FACTORIES
class IMellonApplication(ISparcPyContainerConfiguredApplication):
"""The Application"""
class IMellonFileProvider(interface.Interface):
"""Provides IFile objects that should be processed by the application"""
def __iter__():
"""Iterator of IFile objects"""
class IMellonFileProviderFactory(component.IFactory):
"""A factory producing a IMellonFileProvider"""
def __call__(config):
"""
Args:
config; factory specific data structure holding required object
initialization information needed by factory
"""
#SNIPPETS
class ISnippet(location.ILocation):
"""A snippet of data to be sniffed for secrets
This also implements ILocation, where __parent__ is a IMellonFile and
__name__ indicates where in the file the snippet can be located at.
"""
data = interface.Attribute("A Python data sequence")
class IBytesSnippet(ISnippet):
"""A snippet of bytes data to be sniffed for secrets"""
data = interface.Attribute("A Python bytes sequence")
class IUnicodeSnippet(ISnippet):
"""A snippet of unicode data to be sniffed for secrets"""
data = interface.Attribute("A Python unicode sequence")
class ISnippetIterator(interface.Interface):
"""Iterates data snippets"""
def __iter__():
"""Iterator of ISnippet objects"""
#FILES
class IPath(interface.Interface):
"""Marker for text that is a formatted file system path"""
class IFile(interface.Interface):
"""Marker for file-like object providing Python's file object interface"""
class IMellonFile(ISnippetIterator):
"""A file to be processed by the application"""
def __str__():
"""String locatable identity of file"""
class IUnicodeMellonFile(IMellonFile):
"""A Unicode (text) file to be processed by the application"""
snippet_lines_increment = \
interface.Attribute("Number of lines to jump after each snippet, 0 "+
"indicates entire data.")
snippet_lines_coverage = \
interface.Attribute("Number of lines to include in each snippet "+
"if available, 0 indicates all remaining lines.")
class IByteMellonFile(IMellonFile):
"""A byte (binary) file to be processed by the application"""
read_size = interface.Attribute(\
"Max number of bytes to include in each file read operation."+
"Number of bytes to jump after each snippet, 0 indicates entire data.")
snippet_bytes_increment = \
| interface.Attribute("Number of read_size data packets to jump after "+
"snippet return.")
snippet_bytes_coverage = \
interface.Attribute("Number of read_size data packets to include in "+
"each snippet. 0 indicates all data packets.")
class IBinaryChecker(interface.Interface):
"""Binary file c | hecker"""
def check():
"""True indicates the data was found to be binary"""
# SNIFFERS, SECRETS, WHITELISTS
class ISecretSniffer(interface.Interface):
"""Looks for a secret"""
def __iter__():
"""Iterator of found ISecret providers"""
class ISecret(location.ILocation):
"""A secret found within a ISnippet
This also implements ILocation, where __parent__ is a ISnippet and
__name__ is alias for __str__.
"""
def __str__():
"""String details of the secret and/or how it was found"""
def __hash__():
"""Uniquely identifies the locatable secret among other secrets"""
class IWhitelistInfo(interface.Interface):
"""Object whitelist information"""
def __str__():
"""Detailed information on how object was whitelisted"""
class IWhitelist(interface.Interface):
"""Identifies if object is whitelisted"""
def __iter__():
"""Iterator of found IWhitelistInfo providers"""
|
PGer/incubator-hawq | tools/bin/gppylib/parseutils.py | Python | apache-2.0 | 22,013 | 0.005497 | #!/usr/bin/env python
# Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
"""
parseutils.py
Routines to parse "flexible" configuration files for tools like
gpaddmirrors, gprecoverseg, gpexpand, etc.
Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
"""
import sys
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gppylib.gplog import get_default_logger, logging_is_verbose
logger = get_default_logger()
def caller():
"Return name of calling function"
if logging_is_verbose():
return sys._getframe(1).f_code.co_name + '()'
return ''
def canonicalize_address(addr):
"""
Encases addresses in [ ] per RFC 2732. Generally used to deal with ':'
characters which are also often used as delimiters.
Returns the addr string if it doesn't contain any ':' characters.
If addr contains ':' and also contains a '[' then the addr string is
simply returned under the assumption that it is already escaped as needed.
Otherwise return a new string from addr by adding '[' prefix and ']' suffix.
Examples
--------
>>> canonicalize_address('myhost')
'myhost'
>>> canonicalize_address('127.0.0.1')
'127.0.0.1'
>>> canonicalize_address('::1')
'[::1]'
>>> canonicalize_address('[::1]')
'[::1]'
>>> canonicalize_address('2620:0:170:610::13')
'[2620:0:170:610::13]'
>>> canonicalize_address('[2620:0:170:610::13]')
'[2620:0:170:610::13]'
@param addr: the address to possibly encase in [ ]
@returns: the addresss, encased in [] if necessary
"""
if ':' not in addr: return addr
if '[' in addr: return addr
return '[' + addr + ']'
#
# line parsing
#
def consume_to(delimiter, rest):
"""
Consume characters from rest string until we encounter the delimiter.
Returns (None, after, None) where after are the characters after delimiter
or (None, rest, 'does not contain '+delimiter) when delimiter is not encountered.
Examples
--------
>>> consume_to('=', 'abc=def:ghi')
(None, 'def:ghi', None)
@param delimiter: the delimiter string
@param rest: the string to read such as 'abc:def:ghi'
@returns: (None, after, None) tuple such as (None, 'def:ghi', None)
"""
p = rest.find(delimiter)
if p < 0:
return None, rest, 'does not contain '+delimiter
return None, rest[p+1:], None
def read_to(delimiter, rest):
"""
Read characters from rest string until we encounter the delimiter.
Separate the string into characters 'before' and 'after' the delimiter.
If no delimiter is found, assign entire string to 'before' and None to 'after'.
Examples
--------
>>> read_to(':', 'abc:def:ghi')
('abc', 'def:ghi', None)
>>> read_to(':', 'abc:def')
('abc', 'def', None)
>>> read_to(':', 'abc')
('abc', None, None)
>>> read_to(':', '')
('', None, None)
Note this returns a 3-tuple for compatibility with other routines
which use the third element as an error message
@param delimiter: the delimiter string
@param rest: the string to read such as 'abc:def:ghi'
@returns: (before, after, None) tuple such as ('abc', 'def:ghi', None)
"""
p = rest.find(delimiter)
if p < 0:
return rest, None, None
return rest[0:p], rest[p+1:], None
def read_to_bracketed(delimiter, rest):
"""
Read characters from rest string which is expected to start with a '['.
If rest does not start with '[', return a tuple (None, rest, 'does not begin with [').
If rest string starts with a '[', then read until we find ']'.
If no ']' is found, return a tuple (None, rest, 'does not contain ending ]').
Otherwise separate the string into 'before' representing characters between
'[' and ']' and 'after' representing characters after the ']' and then check
that the first character found after the ']' is a :'.
If there are no characters after the ']', return a tuple (before, None, None)
where before contains the characters between '[' and ']'.
If there are characters after ']' other than the delimiter, return a tuple
(None, rest, 'characters not allowed after ending ]')
Otherwise return a tuple (before, after, None) where before contains the
characters between '[' and ']' and after contains the characters after the ']:'.
This function avoids raising Exceptions for these particular cases of
malformed input since they are easier to report in the calling function.
Examples
--------
>>> read_to_bracketed(':', '[abc:def]')
('abc:def', None, None)
>>> read_to_bracketed(':', '[abc]:def:ghi')
('abc', 'def:ghi', None)
>>> read_to_bracketed(':', '[abc:def]:ghi:jkl')
('abc:def', 'ghi:jkl', None)
>>> read_to_bracketed(':', 'abc:def:ghi:jkl')
(None, 'abc:def:ghi:jkl', 'does not begin with [')
>>> read_to_bracketed(':', '[abc:def:ghi:jkl')
(None, '[abc:def:ghi:jkl', 'does not contain ending ]')
>>> read_to_bracketed(':', '[abc]extra:def:ghi:jkl')
(None, '[abc]extra:def:ghi:jkl', 'characters not allowed after ending ]')
@param delimiter: the delimiter string
@param rest: the string to read such as '[abc:def]:ghi'
@returns: (before, after, reason) tuple such as ('abc:def', 'ghi', None)
"""
if not rest.startsw | ith('['):
return None, rest, 'does not begin with ['
p = rest.find(']')
if p < 0:
return None, rest, 'does not contain ending ]'
if len(rest[p+1:]) < 1:
return rest[1:p], None, None
if rest[p+1] != delimiter:
return None, rest, 'characters not allowed after ending ]'
return rest[1:p], rest[p+2:], N | one
def read_to_possibly_bracketed(delimiter, rest):
"""
Behave as read_bracketed above when rest starts with a '[',
otherwise as read_to_colon. This is intended to support fields
which may contain an IPv6 address, an IPv4 address or a hostname.
Examples
--------
>>> read_to_possibly_bracketed(':', 'abc:def:ghi')
('abc', 'def:ghi', None)
>>> read_to_possibly_bracketed(':', '[abc]:def:ghi')
('abc', 'def:ghi', None)
>>> read_to_possibly_bracketed(':', '[abc:def]:ghi')
('abc:def', 'ghi', None)
>>> read_to_possibly_bracketed(':', '[]:ghi')
('', 'ghi', None)
>>> read_to_possibly_bracketed(':', ':ghi')
('', 'ghi', None)
>>> read_to_possibly_bracketed(':', '[ghi]')
('ghi', None, None)
>>> read_to_possibly_bracketed(':', '[]')
('', None, None)
>>> read_to_possibly_bracketed(':', '')
('', None, None)
@param delimiter: the delimiter string
@param rest: the string to read such as '[abc:def]:ghi'
@returns: (before, after, reason) tuple such as ('abc:def', 'ghi', None)
"""
if rest.startswith('['):
return read_to_bracketed(delimiter, rest)
return read_to(delimiter, rest)
class LineParser:
"""
Manage state to parse a single line, generally from a configuration
file with fields delimited by colons.
"""
def __init__(self, caller, filename, lineno, line):
"Initialize"
(self.caller, self.filename, self.lineno, self.line, self.rest, self.error) = (caller, filename, lineno, line, line, None)
self.logger = logger
if logging_is_verbose():
self.logger.debug("%s:%s" % (filename, lineno))
def ensure_more_to_process(self, name):
"Raise an exception if we've exhausted the input line"
if self.rest is None:
msg = "out of values (reading %s)" % name
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (self.filename, self.lineno, self.caller, self.line, msg))
def read_delimited_field(self, delimiter, name="next field", reader=read_to):
"""
Attempts to read the next field in the line up to the specified delimiter
using the specified reading method, raising any error encountered as an
exception. Returns the read field when successful.
"""
self. |
yieldbot/singularity-py | singularity/commands/cmd_request.py | Python | mit | 10,225 | 0.004988 | import click
from json import dumps, load
import os
import sys
from tabulate import tabulate
@click.group('request')
@click.pass_context
def cli(ctx):
pass
@cli.command(name='unpause', help='Unpause a request')
@click.argument('request-id')
@click.pass_context
def request_unpause(ctx, request_id):
res = ctx.obj['client'].unpause_request(request_id)
if 'error' in res:
click.echo('error during unpause request {0}: {1}'.format(request_id, res['error']))
else:
click.echo('unpaused request {0}'.format(request_id))
@cli.command(name='run', help='Run a on-demand request now')
@click.argument('request-id')
@click.pass_context
def request_run(ctx, request_id):
res = ctx.obj['client'].run_request(request_id)
if 'error' in res:
click.echo('error during running request {0}: {1}'.format(request_id, res['error']))
else:
click.echo('running request {0}'.format(request_id))
@cli.command(name='pause', help='Pause a request')
@click.option('--kill-tasks', '-k', is_fla | g=True, default=False, help='Kill tasks when paused')
@click.argument('request-id')
@click.pass_context
def request_pause(ctx, request_id, kill_tasks):
res = ctx.obj['client'].pause_request(request_id, kill_tasks)
if 'error' in res:
click.echo('error during pause request {0}: {1}'.format(request_id, res['error']))
| else:
click.echo('paused request {0} with killTasks={1}'.format(request_id, kill_tasks))
@cli.command(name='scale', help='Scale a request up/down')
@click.argument('request-id')
@click.argument('instances', '-i', type=click.INT)
@click.pass_context
def request_scale(ctx, request_id, instances):
res = ctx.obj['client'].scale_request(request_id, instances)
if 'error' in res:
click.echo('error during set instances for request {0}: {1}'.format(request_id, res['error']))
else:
click.echo('setting instances to {0} for request {1}'.format(instances, request_id))
@cli.command(name='bounce', help='Restart a request tasks')
@click.argument('request-id')
@click.pass_context
def request_bounce(ctx, request_id):
res = ctx.obj['client'].bounce_request(request_id)
if 'error' in res:
click.echo('error during set instances for request {0}: {1}'.format(request_id, res['error']))
else:
click.echo('bounced request {0}'.format(request_id))
@cli.command(name='get', help='Get the state of a request')
@click.argument('request-id')
@click.option('--json', '-j', is_flag=True, help='Enable json output')
@click.pass_context
def request_get(ctx, request_id, json):
if request_id:
res = ctx.obj['client'].get_request(request_id)
if json:
click.echo(dumps(res, indent=2))
else:
output_request(res)
@cli.command(name='delete', help='Remove a request')
@click.argument('request-id')
@click.pass_context
def request_delete(ctx, request_id):
res = ctx.obj['client'].delete_request(request_id)
if 'error' in res:
click.echo('error during delete request {0}: {1}'.format(request_id, res['error']))
else:
click.echo('deleted request {0}'.format(request_id))
@cli.command(name='list', help='Get a list of requests')
@click.option('--type', '-t', default='all', type=click.Choice(['pending', 'cleanup', 'paused', 'finished', 'cooldown', 'active', 'all']), help='Request type to get')
@click.option('--json', '-j', is_flag=True, help='Enable json output')
@click.pass_context
def request_list(ctx, type, json):
res = ctx.obj['client'].get_requests(type)
if json:
click.echo(dumps(res, indent=2))
else:
output_requests(res)
@cli.command(name='sync', help='Sync one or more requests/deploys')
@click.option('--file', '-f', type=click.File('r'), help='JSON request/deploy file to sync')
@click.option('--dir', '-d', type=click.Path(), help='Directory of JSON request/deploy files to sync')
@click.pass_context
def request_sync(ctx, file, dir):
had_error = False
client = ctx.obj['client']
if file:
file_request = None
try:
file_request = load(file)
except ValueError as e:
click.echo('json parse error: {0} in {1}'.format(e, file.name))
had_error = True
if file_request:
sync_request(client, file_request)
elif dir:
for filename in os.listdir(dir):
if filename.endswith('json'):
with open(os.path.join(dir, filename)) as file:
file_request = None
try:
file_request = load(file)
except ValueError as e:
click.echo('json parse error: {0} in {1}'.format(e, filename))
had_error = True
if file_request:
sync_request(client, file_request)
else:
click.echo('Either --file or --dir is required')
if had_error:
sys.exit(1)
def sync_request(client, request):
requested_instances = request['request'].get('instances', 1)
if requested_instances == 0:
singularity_request = client.pause_request(request['request']['id'], kill_tasks=True)
if 'error' in singularity_request:
click.echo('error during sync request: {0}'.format(singularity_request['error']))
else:
click.echo('syncronized request {0}'.format(request['request']['id']))
else:
isPaused = False
singularity_request = client.get_request(request['request']['id'])
if 'error' in singularity_request and singularity_request['status_code'] == 404:
pass # request didn't exist before
else:
if singularity_request and singularity_request['state'] == 'PAUSED' and requested_instances > 0:
isPaused = True
if not request.get('deploy', {}).get('pauseBeforeDeploy', False):
client.unpause_request(request['request']['id'])
isPaused = False
singularity_request = client.upsert_request(request['request'])
if 'error' in singularity_request:
click.echo('error during sync request: {0}'.format(singularity_request['error']))
else:
click.echo('syncronized request {0}'.format(request['request']['id']))
if 'deploy' in request:
file_deploy_id = request['deploy'].get('id', None)
# always set deploy.requestId to request.id from json file
request['deploy']['requestId'] = request['request']['id']
if 'activeDeploy' in singularity_request:
singularity_deploy_id = singularity_request['activeDeploy'].get('id', None)
if file_deploy_id != singularity_deploy_id:
sync_deploy(client, request['deploy'], isPaused)
else:
sync_deploy(client, request['deploy'], isPaused)
def sync_deploy(client, deploy, isPaused):
unpauseOnSuccessfulDeploy = False
if deploy.get('pauseBeforeDeploy', False):
del deploy['pauseBeforeDeploy']
unpauseOnSuccessfulDeploy = True
if not isPaused:
pause_deploy_and_wait(client, deploy)
res = client.create_deploy(deploy, unpauseOnSuccessfulDeploy)
if 'error' in res:
click.echo('error during sync deploy: {0}'.format(res['error']))
else:
click.echo('syncronized deploy {0} for request {1}'.format(deploy['id'], deploy['requestId']))
return res
def pause_deploy_and_wait(client, deploy):
singularity_request = client.pause_request(deploy['requestId'], kill_tasks=True)
if 'error' in singularity_request:
click.echo('error during pause request: {0}'.format(singularity_request['error']))
else:
click.echo('pausing request {0} before deploy'.format(deploy['requestId']))
if singularity_request:
active_deploy_id = singularity_request.get('activeDeploy', {}).get('id', None)
if active_deploy_id:
no_tasks = False
while not no_tasks:
tasks = client.get_active_deploy_tasks(deploy['requestId'], active_deploy_id)
if len(tasks) == 0:
|
ChuanleiGuo/AlgorithmsPlayground | LeetCodeSolutions/python/72_Edit_Distance.py | Python | mit | 727 | 0 | class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: | int
"""
row = len(word1) + 1
col = len(word2) + 1
dp = [[0] * col for _ in range(row)]
for i in range(col):
dp[0][i] = i
for i in range(row):
dp[i][0] = i
for i in range(1, row):
for j in range(1, col):
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
e | lse:
dp[i][j] = dp[i - 1][j - 1] + 1
dp[i][j] = min(dp[i][j], dp[i - 1][j] + 1, dp[i][j - 1] + 1)
return dp[row - 1][col - 1]
|
joergkappes/opengm | src/interfaces/python/opengm/__init__.py | Python | mit | 10,979 | 0.027689 | #from opengmcore import _opengmcore.adder as adder
from opengmcore import *
from __version__ import version
from functionhelper import *
from _inf_param import _MetaInfParam , InfParam
from _visu import visualizeGm
from _misc import defaultAccumulator
from __version__ import version
import time
from _inference_interface_generator import _inject_interface , InferenceBase
import inference
import hdf5
import benchmark
from _to_native_converter import to_native_boost_python_enum_converter
# initialize solver/ inference dictionaries
_solverDicts=[
(inference.adder.minimizer.solver.__dict__ , 'adder', 'minimizer' ),
(inference.adder.maximizer.solver.__dict__, 'adder', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'adder', 'integrator'), # todo(jyunes) XXX should this be | adder instead?
(inference.adder.logsumexp.solver.__dict__, 'adder', 'logsumexp'),
(inference.multiplier.minimizer.solver.__dict__, 'multiplier', 'minimizer' ),
(inference.multiplier.maximizer.solver.__dict__, 'multiplier', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'multiplier', 'integrator')
]
for infClass,infName in _inject_interface(_solverDicts):
inference.__dict__[infName]=infClas | s
class Timer:
def __init__(self, name=None , verbose = True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.name and self.verbose:
print '[%s]' % self.name
self.tstart = time.time()
return self
def __exit__(self, type, value, traceback):
#if self.name:
# print '[%s]' % self.name,
self.elapsed = time.time() - self.tstart
if self.verbose:
print ' Elapsed: %s' % (time.time() - self.tstart)
def weightRandomizer(noiseType = 'normalAdd', noiseParam=1.0, seed=42, ignoreSeed = True):
p = inference.adder.minimizer.solver._WeightRandomizerParameter_()
ntenum = inference.adder.minimizer.solver._WeightRandomization_NoiseType_
if noiseType == 'none' or noiseType =='noNoise':
nt =ntenum.none
elif noiseType == 'normalAdd':
nt =ntenum.normalAdd
elif noiseType == 'normalMult':
nt =ntenum.normalMult
elif noiseType == 'uniformAdd':
nt =ntenum.uniformAdd
else:
raise RuntimeError("unknown noise type")
p.noiseType = nt
p.noiseParam = float(noiseParam)
p.seed = int(seed)
p.ignoreSeed = bool(ignoreSeed)
return p
def saveGm(gm, f, d='gm'):
""" save a graphical model to a hdf5 file:
Args:
gm : graphical model to save
f : filepath
g : dataset (defaut : 'gm')
"""
hdf5.saveGraphicalModel(gm, f, d)
def loadGm(f, d='gm', operator='adder'):
""" save a graphical model to a hdf5 file:
Args:
f : filepath
g : dataset (defaut : 'gm')
operator : operator of the graphical model ('adder' / 'multiplier')
"""
if(operator=='adder'):
gm=adder.GraphicalModel()
elif(operator=='multiplier'):
gm=multiplier.GraphicalModel()
else:
raise RuntimeError("unknown operator: "+ operator)
hdf5.loadGraphicalModel(gm,f,d)
return gm
class TestModels(object):
@staticmethod
def chain3(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-2):
f=numpy.random.rand(nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2])
return model
@staticmethod
def chain4(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-3):
f=numpy.random.rand(nLabels,nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2,x0+3])
return model
@staticmethod
def chainN(nVar,nLabels,order,nSpecialUnaries=0,beta=1.0):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
for sn in range(nSpecialUnaries):
r=int(numpy.random.rand(1)*nVar-1)
rl=int(numpy.random.rand(1)*nLabels-1)
unaries[r,rl]=0.0
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-(order-1)):
f=numpy.random.rand( *([nLabels]*order))
f*=beta
vis=numpy.arange(order)
vis+=x0
model.addFactor(model.addFunction(f),vis)
return model
@staticmethod
def secondOrderGrid(dx,dy,nLabels):
nVar=dx*dy
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
vis2Order=secondOrderGridVis(dx,dy,True)
nF2=len(vis2Order)#.shape[0]
f2s=numpy.random.rand(nF2,nLabels)
model.addFactors(model.addFunctions(f2s),vis2Order)
return model
class GenericTimingVisitor(object):
def __init__(self,visitNth=1,reserve=0,verbose=True,multiline=True):
self.visitNth=visitNth
self.reserve=reserve
self.verbose=verbose
self.multiline=multiline
self.values_ = None
self.runtimes_ = None
self.bounds_ = None
self.iterations_ = None
self.t0 = None
self.t1 = None
self.iterNr = 0
def getValues(self):
return numpy.require(self.values_,dtype=value_type)
def getTimes(self):
return numpy.require(self.runtimes_,dtype=value_type)
def getBounds(self):
return numpy.require(self.bounds_,dtype=value_type)
def getIterations(self):
return numpy.require(self.iterations_,dtype=value_type)
def begin(self,inf):
v = inf.value()
b = inf.bound()
self.values_ =[v]
self.bounds_ =[b]
self.runtimes_ =[0.0]
self.iterations_=[self.iterNr]
if self.verbose :
print 'Begin : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# start the timing
self.t0 =time.time()
self.t1 =time.time()
def visit(self,inf):
if(self.iterNr==0 or self.iterNr%self.visitNth==0):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'Step : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
# increment iteration number
self.iterNr+=1
# restart the timing
self.t0=time.time()
else:
# increment iteration number
self.iterNr+=1
def end(self,inf):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'End : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
class __RandomFusion__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.steps = kwargs.get('steps', 100)
self.fusionSolver = kwargs.get('fuisionSolver', 'lf2')
self.arg_ = None
self.value_ = None
self.fusionMover=inference.adder.minimizer.FusionMover(self.gm_)
self.nLabels = self.gm_.numberOfLabels(0)
self.nVar = self.gm_.numberOfVariab |
Titan-C/scikit-learn | sklearn/feature_selection/tests/test_mutual_info.py | Python | bsd-3-clause | 6,881 | 0 | from __future__ import division
import numpy as np
from numpy.testing import run_module_suite
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_array_equal, assert_almost_equal,
assert_false, assert_raises, assert_equal,
assert_allclose, assert_greater)
from sklearn.feature_selection.mutual_info_ import (
mutual_info_regression, mutual_info_classif, _compute_mi)
def test_compute_mi_dd():
# In discrete case computations are straightforward and can be done
# by hand on given vectors.
x = np.array([0, 1, 1, 0, 0])
y = np.array([1, 0, 0, 0, 1])
H_x = H_y = -(3/5) * np.log(3/5) - (2/5) * np.log(2/5)
H_xy = -1/5 * np.log(1/5) - 2/5 * np.log(2/5) - 2/5 * np.log(2/5)
I_xy = H_x + H_y - H_xy
assert_almost_equal(_compute_mi(x, y, True, True), I_xy)
def test_compute_mi_cc():
# For two continuous variables a good approach is to test on bivariate
# normal distribution, where mutual information is known.
# Mean of the distribution, irrelevant for mutual information.
mean = np.zeros(2)
# Setup covariance matrix with correlation coeff. equal 0.5.
sigma_1 = 1
sigma_2 = 10
corr = 0.5
cov = np.array([
[sigma_1**2, corr * sigma_1 * sigma_2],
[corr * sigma_1 * sigma_2, sigma_2**2]
])
# True theoretical mutual information.
I_theory = (np.log(sigma_1) + np.log(sigma_2) -
0.5 * np.log(np.linalg.det(cov)))
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
x, y = Z[:, 0], Z[:, 1]
# Theory and computed values won't be very close, assert that the
# first figures after decimal point match.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, False, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd():
# To test define a joint distribution as follows:
# p(x, y) = p(x) p(y | x)
# X ~ Bernoulli(p)
# (Y | x = 0) ~ Uniform(-1, 1)
# (Y | x = 1) ~ Uniform(0, 2)
# Use the following formula for mutual information:
# I(X; Y) = H(Y) - H(Y | X)
# Two entropies can be computed by hand:
# H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2)
# H(Y | X) = ln(2)
# Now we need to implement sampling from out distribution, which is
# done easily using conditional distribution logic.
n_samples = 1000
np.random.seed(0)
for p in [0.3, 0.5, 0.7]:
x = np.random.uniform(size=n_samples) > p
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
I_theory = -0.5 * ((1 - p) * np.log(0.5 * (1 - p)) +
p * np.log(0.5 * p) + np.log(0.5)) - np.log(2)
# Assert the same tolerance.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, True, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd_unique_label():
# Test that adding unique label doesn't change MI.
n_samples = 100
x = np.random.uniform(size=n_samples) > 0.5
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
mi_1 = _compute_mi(x, y, True, False)
x = np.hstack((x, 2))
y = np.hstack((y, 10))
mi_2 = _compute_mi(x, y, True, False)
assert_equal(mi_1, mi_2)
# We are going test that feature ordering by MI matches our expectations.
d | ef test_mutual_info_classif_discrete():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]])
y = np.array([0, 1, 2, 2, 1])
# Here X[:, 0] is the most informative feature, and X[:, 1] is weakly
# informative.
mi = mutual_info_classif(X, y, discrete_features=True)
assert_array_equal(np.argsort(-mi), np.array([0, 2, 1]))
def test_mutua | l_info_regression():
# We generate sample from multivariate normal distribution, using
# transformation from initially uncorrelated variables. The zero
# variables after transformation is selected as the target vector,
# it has the strongest correlation with the variable 2, and
# the weakest correlation with the variable 1.
T = np.array([
[1, 0.5, 2, 1],
[0, 1, 0.1, 0.0],
[0, 0.1, 1, 0.1],
[0, 0.1, 0.1, 1]
])
cov = T.dot(T.T)
mean = np.zeros(4)
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
X = Z[:, 1:]
y = Z[:, 0]
mi = mutual_info_regression(X, y, random_state=0)
assert_array_equal(np.argsort(-mi), np.array([1, 2, 0]))
def test_mutual_info_classif_mixed():
# Here the target is discrete and there are two continuous and one
# discrete feature. The idea of this test is clear from the code.
np.random.seed(0)
X = np.random.rand(1000, 3)
X[:, 1] += X[:, 0]
y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int)
X[:, 2] = X[:, 2] > 0.5
mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3,
random_state=0)
assert_array_equal(np.argsort(-mi), [2, 0, 1])
for n_neighbors in [5, 7, 9]:
mi_nn = mutual_info_classif(X, y, discrete_features=[2],
n_neighbors=n_neighbors, random_state=0)
# Check that the continuous values have an higher MI with greater
# n_neighbors
assert_greater(mi_nn[0], mi[0])
assert_greater(mi_nn[1], mi[1])
# The n_neighbors should not have any effect on the discrete value
# The MI should be the same
assert_equal(mi_nn[2], mi[2])
def test_mutual_info_options():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]], dtype=float)
y = np.array([0, 1, 2, 2, 1], dtype=float)
X_csr = csr_matrix(X)
for mutual_info in (mutual_info_regression, mutual_info_classif):
assert_raises(ValueError, mutual_info_regression, X_csr, y,
discrete_features=False)
mi_1 = mutual_info(X, y, discrete_features='auto', random_state=0)
mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)
mi_3 = mutual_info(X_csr, y, discrete_features='auto',
random_state=0)
mi_4 = mutual_info(X_csr, y, discrete_features=True,
random_state=0)
assert_array_equal(mi_1, mi_2)
assert_array_equal(mi_3, mi_4)
assert_false(np.allclose(mi_1, mi_3))
if __name__ == '__main__':
run_module_suite()
|
crossbario/crossbarexamples | xbr/teststack1/_init/set_xbrmm_key.py | Python | apache-2.0 | 521 | 0.007678 | import sys
import binascii
from init_data import MARKETS
xbrmm_key_file = sys.argv[1]
#from init_data import ACCOUNTS, _ACCOUNT_K | EYS
#xbr_marketmaker = 'marketop1-marketmaker1'
#addr, pkey = _ACCOUNT_KEYS[ACCOUNTS[xbr_marketmaker]]
addr = MARK | ETS[0]['maker']
pkey = MARKETS[0]['maker_pkey']
print(xbrmm_key_file)
with open(xbrmm_key_file, 'wb') as f:
pkey = binascii.a2b_hex(pkey[2:])
f.write(pkey)
print('XBR market maker with address 0x{}: private key written to file "{}"'.format(addr, xbrmm_key_file))
|
killbill/killbill-client-python | killbill/models/tenant_key_value.py | Python | apache-2.0 | 4,248 | 0.001177 | # coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this f | ile to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KI | ND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TenantKeyValue(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'key': 'Str',
'values': 'List[Str]'
}
attribute_map = {
'key': 'key',
'values': 'values'
}
def __init__(self, key=None, values=None): # noqa: E501
"""TenantKeyValue - a model defined in Swagger""" # noqa: E501
self._key = None
self._values = None
self.discriminator = None
if key is not None:
self.key = key
if values is not None:
self.values = values
@property
def key(self):
"""Gets the key of this TenantKeyValue. # noqa: E501
:return: The key of this TenantKeyValue. # noqa: E501
:rtype: Str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this TenantKeyValue.
:param key: The key of this TenantKeyValue. # noqa: E501
:type: Str
"""
self._key = key
@property
def values(self):
"""Gets the values of this TenantKeyValue. # noqa: E501
:return: The values of this TenantKeyValue. # noqa: E501
:rtype: List[Str]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this TenantKeyValue.
:param values: The values of this TenantKeyValue. # noqa: E501
:type: List[Str]
"""
self._values = values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TenantKeyValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.