repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
pseudomuto/kazurator
|
kazurator/read_write_lock.py
|
Python
|
mit
| 3,177
| 0
|
from kazoo.exceptions import NoNodeError
from sys import maxsize
from .mutex import Mutex
from .internals import LockDriver
from .utils import lazyproperty
READ_LOCK_NAME = "__READ__"
WRITE_LOCK_NAME = "__WRIT__"
class _LockDriver(LockDriver):
def sort_key(self, string, _lock_name):
string = super(_LockDriver, self).sort_key(string, READ_LOCK_NAME)
string = super(_LockDriver, self).sort_key(string, WRITE_LOCK_NAME)
return string
class _ReadLockDriver(_LockDriver):
def __init__(self, predicate):
super(_ReadLockDriver, self).__init__()
self._predicate = predicate
def is_acquirable(self, children, sequence_node_name, max_leases):
return self._predicate(children, sequence_node_name)
class _Mutex(Mutex):
def __init__(self, client, path, name, max_leases, driver, timeout):
super(_Mutex, self).__init__
|
(
client,
path,
|
max_leases,
name=name,
driver=driver,
timeout=timeout
)
def get_participant_nodes(self):
nodes = super(_Mutex, self).get_participant_nodes()
return list(filter(lambda node: self.name in node, nodes))
class ReadWriteLock(object):
def __init__(self, client, path, timeout=None):
self._client = client
self._path = path
self._timeout = timeout
@property
def path(self):
return self._path
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
self.read_lock.timeout = value
self.write_lock.timeout = value
@lazyproperty
def read_lock(self):
def predicate(children, sequence_node_name):
return self._read_is_acquirable_predicate(
children,
sequence_node_name
)
return _Mutex(
self._client,
self.path,
READ_LOCK_NAME,
maxsize,
_ReadLockDriver(predicate),
self.timeout
)
@lazyproperty
def write_lock(self):
return _Mutex(
self._client,
self.path,
WRITE_LOCK_NAME,
1,
_LockDriver(),
self.timeout
)
def get_participant_nodes(self):
nodes = self.read_lock.get_participant_nodes()
nodes.extend(self.write_lock.get_participant_nodes())
return nodes
def _read_is_acquirable_predicate(self, children, sequence_node_name):
if self.write_lock.is_owned_by_current_thread:
return (None, True)
index = 0
write_index = maxsize
our_index = -1
for node in children:
if WRITE_LOCK_NAME in node:
write_index = min(index, write_index)
elif node.startswith(sequence_node_name):
our_index = index
break
index += 1
if our_index < 0:
raise NoNodeError
acquirable = our_index < write_index
path = None if acquirable else children[write_index]
return (path, acquirable)
|
AlericInglewood/3p-google-breakpad
|
src/tools/gyp/pylib/gyp/generator/scons.py
|
Python
|
bsd-3-clause
| 34,839
| 0.00643
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gyp
import gyp.common
import gyp.SCons as SCons
import os.path
import pprint
import re
# TODO: remove when we delete the last WriteList() call in this module
WriteList = SCons.WriteList
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': '${LIBPREFIX}',
'SHARED_LIB_PREFIX': '${SHLIBPREFIX}',
'STATIC_LIB_SUFFIX': '${LIBSUFFIX}',
'SHARED_LIB_SUFFIX': '${SHLIBSUFFIX}',
'INTERMEDIATE_DIR': '${INTERMEDIATE_DIR}',
'SHARED_INTERMEDIATE_DIR': '${SHARED_INTERMEDIATE_DIR}',
'OS': 'linux',
'PRODUCT_DIR': '$TOP_BUILDDIR',
'SHARED_LIB_DIR': '$LIB_DIR',
'LIB_DIR': '$LIB_DIR',
'RULE_INPUT_ROOT': '${SOURCE.filebase}',
'RULE_INPUT_DIRNAME': '${SOURCE.dir}',
'RULE_INPUT_EXT': '${SOURCE.suffix}',
'RULE_INPUT_NAME': '${SOURCE.file}',
'RULE_INPUT_PATH': '${SOURCE.abspath}',
'CONFIGURATION_NAME': '${CONFIG_NAME}',
}
# Tell GYP how to process the input for us.
generator_handles_variants = True
generator_wants_absolute_build_file_paths = True
def FixPath(path, prefix):
if not os.path.isabs(path) and not path[0] == '$':
path = prefix + path
return path
header = """\
# This file is generated; do not edit.
"""
_alias_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Alias(
['_%(target_name)s_action'],
%(inputs)s,
_action
)
env.AlwaysBuild(_outputs)
"""
_run_as_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
"""
_run_as_template_suffix = """
_run_as_target = env.Alias('run_%(target_name)s', target_files, _action)
env.Requires(_run_as_target, [
Alias('%(target_name)s'),
])
env.AlwaysBuild(_run_as_target)
"""
_command_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Command(
%(outputs)s,
%(inputs)s,
_action
)
"""
# This is copied from the default SCons action, updated to handle symlinks.
_copy_action_template = """
import shutil
import SCons.Action
def _copy_files_or_dirs_or_symlinks(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
def _copy_files_or_dirs_or_symlinks_str(dest, src):
return 'Copying %s to %s ...' % (src, dest)
GYPCopy = SCons.Action.ActionFactory(_copy_files_or_dirs_or_symlinks,
_copy_files_or_dirs_or_symlinks_str,
convert=str)
"""
_rule_template = """
%(name)s_additional_inputs = %(inputs)s
%(name)s_outputs = %(outputs)s
def %(name)s_emitter(target, source, env):
return (%(name)s_outputs, source + %(name)s_additional_inputs)
if GetOption('verbose'):
%(name)s_action = Action([%(action)s])
else:
%(name)s_action = Action([%(action)s], %(message)s)
env['BUILDERS']['%(name)s'] = Builder(action=%(name)s_action,
emitter=%(name)s_emitter)
_outputs = []
_processed_input_files = []
for infile in input_files:
if (type(infile) == type('')
and not os.path.isabs(infile)
and not infile[0] == '$'):
infile = %(src_dir)r + infile
if str(infile).endswith('.%(extension)s'):
_generated = env.%(name)s(infile)
env.Precious(_generated)
_outputs.append(_generated)
%(process_outputs_as_sources_line)s
else:
_processed_input_files.append(infile)
prerequisites.extend(_outputs)
input_files = _processed_input_files
"""
_spawn_hack = """
import re
import SCons.Platform.posix
needs_shell = re.compile('["\\'><!^&]')
def gyp_spawn(sh, escape, cmd, args, env):
def strip_scons_quotes(arg):
if arg[0] == '"' and arg[-1] == '"':
return arg[1:-1]
return arg
stripped_args = [strip_scons_quotes(a) for a in args]
if needs_shell.search(' '.join(stripped_args)):
return SCons.Platform.posix.exec_spawnvpe([sh, '-c', ' '.join(args)], env)
else:
return SCons.Platform.posix.exec_spawnvpe(stripped_args, env)
"""
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def InvertNaiveSConsQuoting(s):
"""SCons tries to "help"
|
with quoting by naively putting double-quotes around
command-line arguments containing space or tab, which is broken for all
but trivial cases, so we undo it. (See quote_spaces() in Subst.py)"""
if ' ' in s or '\t' in s:
|
# Then SCons will put double-quotes around this, so add our own quotes
# to close its quotes at the beginning and end.
s = '"' + s + '"'
return s
def EscapeSConsVariableExpansion(s):
"""SCons has its own variable expansion syntax using $. We must escape it for
strings to be interpreted literally. For some reason this requires four
dollar signs, not two, even without the shell involved."""
return s.replace('$', '$$$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = InvertNaiveSConsQuoting(s)
s = EscapeSConsVariableExpansion(s)
return s
def GenerateConfig(fp, config, indent='', src_dir=''):
"""
Generates SCons dictionary items for a gyp configuration.
This provides the main translation between the (lower-case) gyp settings
keywords and the (upper-case) SCons construction variables.
"""
var_mapping = {
'ASFLAGS' : 'asflags',
'CCFLAGS' : 'cflags',
'CFLAGS' : 'cflags_c',
'CXXFLAGS' : 'cflags_cc',
'CPPDEFINES' : 'defines',
'CPPPATH' : 'include_dirs',
# Add the ldflags value to $LINKFLAGS, but not $SHLINKFLAGS.
# SCons defines $SHLINKFLAGS to incorporate $LINKFLAGS, so
# listing both here would case 'ldflags' to get appended to
# both, and then have it show up twice on the command line.
'LINKFLAGS' : 'ldflags',
}
postamble='\n%s],\n' % indent
for scons_var in sorted(var_mapping.keys()):
gyp_var = var_mapping[scons_var]
value = config.get(gyp_var)
if value:
if gyp_var in ('defines',):
value = [EscapeCppDefine(v) for v in value]
if gyp_var in ('include_dirs',):
if src_dir and not src_dir.endswith('/'):
src_dir += '/'
result = []
for v in value:
v = FixPath(v, src_dir)
# Force SCons to evaluate the CPPPATH directories at
# SConscript-read time, so delayed evaluation of $SRC_DIR
# doesn't point it to the --generator-output= directory.
result.append('env.Dir(%r)' % v)
value = result
else:
value = map(repr, value)
WriteList(fp,
value,
prefix=indent,
preamble='%s%s = [\n ' % (indent, scons_var),
postamble=postamble)
def GenerateSConscript(output_filename, spec, build_file, build_file_data):
"""
Generates a SConscript file for a specific target.
This generates a SConscript file suitable for building any or all of
the target's configurations.
A SConscript file may be called multiple times to generate targets for
multiple configurations. Consequently, it needs to be ready to build
the target for any requested configuration, and therefore contains
information about the settings for all configurations (generated into
the SConscript file at gyp configuration time) as well as logic for
selecting (at SCons build time) the specific configuration being built.
|
aurule/Sociogram
|
src/sociogram.py
|
Python
|
apache-2.0
| 60,889
| 0.008836
|
#!/usr/bin/env python2
'''
Copyright (c) 2012 Peter Andrews
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#import system libraries
from __future__ import division
from gi.repository import Gtk, GooCanvas, Gdk
import networkx as nx
import xml.etree.ElementTree as et
from time import time
from os.path import basename, dirname
#import local libraries
import Errors
import Graph
import Drawing
from ETree import sub_element as sub
from undobuffer import UndoableBuffer as UBuff
class Sociogram(object):
def __init__(self):
'''Set up internals and instantiate/fix up GUI using Gtk.Builder.'''
self.version = "0.1"
self.G = Graph.Sociograph() # instantiate the graph for storage and positioning
#placeholders for selecting objects
self.selection = None
self.seltype = None
self.seldata = None
self.highlight_dist = 1
self.highlight = False
self.savepath = None
self.lastsave = time()
self.dirty = False
self.title = "Untitled Diagram - Sociogram"
self.doc_title = ""
self.doc_desc = ""
self.builder = Gtk.Builder()
self.builder.add_from_file("ui/sociogram.ui")
#set default type for new objects
self.builder.get_object("newtypesel").set_active(0)
self.node_lbl_store = Gtk.ListStore(str)
completions = []
for t in range(5):
x = Gtk.EntryCompletion()
x.set_model(self.node_lbl_store)
x.set_text_column(0)
x.set_minimum_key_length(1)
x.set_inline_completion(True)
x.set_popup_single_match(False)
completions.append(x)
#populate from_combo, to_combo, attr_edit_name with liststore and renderer
from_combo = self.builder.get_object("from_combo")
from_combo.set_model(self.node_lbl_store)
self.from_main = self.builder.get_object("from_combo_entry")
self.from_main.set_completion(completions[0])
to_combo = self.builder.get_object("to_combo")
to_combo.set_model(self.node_lbl_store)
self.to_main = self.builder.get_object("to_combo_entry")
self.to_main.set_completion(completions[1])
#populate from_combo_dlg and to_combo_dlg from the same model as above
from_combo_dlg = self.builder.get_object("from_combo_dlg")
from_combo_dlg.set_model(self.node_lbl_store)
self.from_dlg = self.builder.get_object("from_combo_dlg_entry")
self.from_dlg.set_completion(completions[2])
to_combo_dlg = self.builder.get_object("to_combo_dlg")
to_combo_dlg.set_model(self.node_lbl_store)
self.to_dlg = self.builder.get_object("to_combo_dlg_entry")
self.to_dlg.set_completion(completions[3])
#add completion to toolbar node search field
searchbar = self.builder.get_object("search_entry")
searchbar.set_completion(completions[4])
#connect attribute view with attribute list, create columns, and make it all sortable
editname = Gtk.CellRendererText()
editname.set_property("editable", True)
editname.connect("edited", self.update_attrs, 0)
editval = Gtk.CellRendererText()
editval.set_property("editable", True)
editval.connect("edited", self.update_attrs, 1)
self.attr_store = Gtk.ListStore(str, str, bool, str)
self.attr_store.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.attr_disp = self.builder.get_object("attrstree")
self.attr_disp.set_model(self.attr_store)
self.namecol = Gtk.TreeViewColumn("Name", editname, text=0)
self.namecol.set_sort_column_id(0)
self.namecol.set_expand(True)
col2 = Gtk.TreeViewColumn("Value", editval, text=1)
col2.set_sort_column_id(1)
col2.set_expand(True)
togglecell = Gtk.CellRendererToggle()
togglecell.connect("toggled", self.update_attrs, None, 2)
col3 = Gtk.TreeViewColumn("Visible", togglecell, active=2)
col3.set_sort_column_id(2)
self.attr_disp.append_column(self.namecol)
self.attr_disp.append_column(col2)
self.attr_disp.append_column(col3)
self.rel_store = Gtk.ListStore(str, str)
rel_combo = self.builder.get_object("rel_combo")
rel_combo.set_model(self.rel_store)
cell = Gtk.CellRendererText()
rel_combo.pack_start(cell, True)
rel_combo.add_attribute(cell, 'text', 0)
#create canvas object and add to the scroll window
#VERY IMPORTANT. using the normal window.add() call fails, but setting the parent like this makes everything fine
self.canvas = Drawing.Canvas(parent=self.builder.get_object("canvas_scroll"), has_tooltip=True, background_color="white")
#attach callbacks
self.canvas.node_callback = self.node_clicked
self.canvas.line_callback = self.line_clicked
self.canvas.key_handler = self.canvas_key_handler
self.canvas.connect("button-press-event", self.canvas_clicked)
self.canvas.connect("scroll-event", self.scroll_handler)
sel
|
f.canvas.mouseover_callback = self.update_pointer
#TODO once the prefs dialog is implemented, this should be moved to a separate default style update function
|
#populate our default styling
sheet = self.canvas.edge_default_stylesheet
sheet.stroke_color = 0x000000ff
sheet.set_fontdesc('sans normal 11')
sheet.sel_color = 0xff0000ff
sheet.sel_width = 1
sheet.text_color = 0x000000ff
sheet.set_fontdesc('sans normal 11')
sheet = self.canvas.vertex_default_stylesheet
sheet.fill_color = 0xffff00ff
sheet.stroke_color = 0x000000ff
sheet.sel_color = 0x000000ff
sheet.text_color = 0x000000ff
sheet.set_fontdesc('sans normal 11')
#create file type filters
supported_extensions = {"XML Files":"*.xml", "Sociogram Files":"*.soc", "All Files":"*.*"}
self.save_dlg = self.builder.get_object("save_dlg")
self.open_dlg = self.builder.get_object("open_dlg")
for name, ext in supported_extensions.iteritems():
fil = Gtk.FileFilter()
fil.set_name(name)
fil.add_pattern("*.xml")
self.save_dlg.add_filter(fil)
self.open_dlg.add_filter(fil)
#create undoable text buffers
self.notes_buff = UBuff()
self.notes_buff.connect('insert-text', self.notes_check_undo)
self.notes_buff.connect('delete-range', self.notes_check_undo)
self.desc_buff = UBuff()
self.desc_buff.connect('insert-text', self.desc_check_undo)
self.desc_buff.connect('delete-range', self.desc_check_undo)
self.builder.get_object("notes_view").set_buffer(self.notes_buff)
self.builder.get_object("docdesc_view").set_buffer(self.desc_buff)
# Declare references for all the dialogs and popups we need. We do keep
# the builder around, so this is mostly for code readability.
# TODO instantiate reference to everything we care about, so that the
# expensive builder can be nixed.
self.not_implemented_box = self.builder.get_object("not_implemented_err")
self.about_dlg = self.builder.get_object("about_dlg")
self.add_item_dlg = self.builder.get_object("add_item_dlg")
self.dup_err_dlg = self.builder.get_object("dup_err_dlg")
self.export_dlg = self.buil
|
verdyanna/new_troika
|
fixture/platform_helper.py
|
Python
|
apache-2.0
| 2,490
| 0.002477
|
class PlatformHelper:
def __init__(self, app):
self.app = app
def fill_contact_form(self, contact_model):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_link_text("add new").click()
self.fillin_contact_form(contact_model)
def fillin_contact_form(self, contact_model): #contact_model это конструктор, там перечисляем все поля для заполнения площадки
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").send_keys(contact_model.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").send_keys(contact_model.lastname)
wd.find_element_by_name("ho
|
me").click()
wd.find_element_by_name("home").send_keys(contact_model.hom
|
e)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").send_keys(contact_model.mobile)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").send_keys(contact_model.work)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact_model.phone2)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact_model.address)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def return_to_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
def fill_platform_form(self, platform_model):
wd = self.app.wd
wd.find_element_by_link_text("Начать работу").click()
wd.find_element_by_id("add-object").click()
wd.find_element_by_id("id_address").click()
wd.find_element_by_id("id_address").clear()
wd.find_element_by_id("id_address").send_keys(platform_model.id_address)
wd.find_element_by_css_selector("span.suggestions-value").click()
wd.find_element_by_id("id_mkd").click()
wd.find_element_by_id("ui-id-8").click()
wd.find_element_by_id("id_qty_container").click()
wd.find_element_by_id("id_qty_container").clear()
wd.find_element_by_id("id_qty_container").send_keys(platfrom_model.qty_container)
|
scienceopen/pyrinex
|
src/georinex/nav2.py
|
Python
|
mit
| 8,213
| 0.001461
|
#!/usr/bin/env python
from pathlib import Path
from datetime import datetime
from typing import Dict, Union, Any, Sequence
from typing.io import TextIO
import xarray
import numpy as np
import logging
from .rio import opener, rinexinfo
from .common import rinex_string_to_float
#
STARTCOL2 = 3 # column where numerical data starts for RINEX 2
Nl = {"G": 7, "R": 3, "E": 7} # number of additional SV lines
def rinexnav2(fn: Union[TextIO, str, Path], tlim: Sequence[datetime] = None) -> xarray.Dataset:
"""
Reads RINEX 2.x NAV files
Michael Hirsch, Ph.D.
SciVision, Inc.
http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
ftp://igs.org/pub/data/format/rinex211.txt
"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
Lf = 19 # string length per field
svs = []
times = []
raws = []
with opener(fn) as f:
header = navheader2(f)
if header["filetype"] == "N":
svtype = "G"
fields = [
"SVclockBias",
"SVclockDrift",
"SVclockDriftRate",
"IODE",
"Crs",
"DeltaN",
"M0",
"Cuc",
"Eccentricity",
"Cus",
"sqrtA",
"Toe",
"Cic",
"Omega0",
"Cis",
"Io",
"Crc",
"omega",
"OmegaDot",
"IDOT",
"CodesL2",
"GPSWeek",
"L2Pflag",
"SVacc",
"health",
"TGD",
"IODC",
"TransTime",
"FitIntvl",
]
elif header["filetype"] == "G":
svtype = "R" # GLONASS
fields = [
"SVclockBias",
"SVrelFreqBias",
"MessageFrameTime",
"X",
"dX",
"dX2",
"health",
"Y",
"dY",
"dY2",
"FreqNum",
"Z",
"dZ",
"dZ2",
"AgeOpInfo",
]
elif header["filetype"] == "E":
svtype = "E" # Galileo
fields = [
"SVclockBias",
"SVclockDrift",
"SVclockDriftRate",
"IODnav",
"Crs",
"DeltaN",
"M0",
"Cuc",
"Eccentricity",
"Cus",
"sqrtA",
"Toe",
"Cic",
"Omega0",
"Cis",
"Io",
"Crc",
"omega",
"OmegaDot",
"IDOT",
"DataSrc",
"GALWeek",
"SISA",
"health",
"BGDe5a",
"BGDe5b",
"TransTime",
]
else:
raise NotImplementedError(f'I do not yet handle Rinex 2 NAV {header["sys"]} {fn}')
# %% read data
for ln in f:
try:
time = _timenav(ln)
except ValueError:
continue
if tlim is not None:
if time < tlim[0]:
_skip(f, Nl[header["systems"]])
continue
elif time > tlim[1]:
break
# %% format I2 http://gage.upc.edu/sites/default/files/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
svs.append(f"{svtype}{ln[:2]}")
times.append(time)
"""
now get the data as one big long string per SV
"""
raw = ln[
22:79
] # NOTE: MUST be 79, not 80 due to some files that put \n a character early!
for _ in range(Nl[header["systems"]]):
raw += f.readline()[STARTCOL2:79]
# one line per SV
# NOTE: Sebastijan added .replace(' ', ' ').replace(' -', '-')
# here, I would like to see a file that needs this first, to be sure
# I'm not needlessly slowing down reading or creating new problems.
raws.append(raw.replace("D", "E").replace("\n", ""))
# %% parse
svs = [s.replace(" ", "0") for s in svs]
svu = sorted(set(svs))
atimes = np.asarray(times)
timesu = np.unique(atimes)
data = np.empty((len(fields), timesu.size, len(svu)))
data.fill(np.nan)
for j, sv in enumerate(svu): # for each SV, across all values and times...
svi = [i for i, s in enumerate(svs) if s == sv] # these rows are for this SV
tu = np.unique(atimes[svi]) # this SV was seen at these times
if tu.size != atimes[svi].size:
logging.warning(f"duplicate times detected, skipping SV {sv}")
continue
for i in svi:
it = np.nonzero(timesu == times[i])[0][0] # int by defn
"""
some files sometimes drop the last measurement, this fixes that.
It assumes the blank is always in the last measurement for now.
"""
dvec = [
float(raws[i][k * Lf : (k + 1) * Lf])
for k in range(min(len(fields), len(raws[i]) // Lf))
]
data[: len(dvec), it, j] = dvec
# %% assemble output
# NO
|
TE: time must be datetime64[ns] or .to_netcdf will fail
nav = xarray.Dataset(coords={"time": timesu.astype("datetime64[ns]"), "sv": svu})
for i, k in enumerate(fields):
if k is None:
continue
nav[k] = (("time", "sv"), data[i, :, :])
|
# GLONASS uses kilometers to report its ephemeris.
# Convert to meters here to be consistent with NAV3 implementation.
if svtype == "R":
for name in ["X", "Y", "Z", "dX", "dY", "dZ", "dX2", "dY2", "dZ2"]:
nav[name] *= 1e3
# %% other attributes
nav.attrs["version"] = header["version"]
nav.attrs["svtype"] = [svtype] # Use list for consistency with NAV3.
nav.attrs["rinextype"] = "nav"
if isinstance(fn, Path):
nav.attrs["filename"] = fn.name
if "ION ALPHA" in header and "ION BETA" in header:
alpha = header["ION ALPHA"]
alpha = [rinex_string_to_float(alpha[2 + i * 12 : 2 + (i + 1) * 12]) for i in range(4)]
beta = header["ION BETA"]
beta = [rinex_string_to_float(beta[2 + i * 12 : 2 + (i + 1) * 12]) for i in range(4)]
nav.attrs["ionospheric_corr_GPS"] = np.hstack((alpha, beta))
return nav
def navheader2(f: TextIO) -> Dict[str, Any]:
"""
For RINEX NAV version 2 only. End users should use rinexheader()
"""
if isinstance(f, (str, Path)):
with opener(f, header=True) as h:
return navheader2(h)
hdr = rinexinfo(f)
for ln in f:
if "END OF HEADER" in ln:
break
kind, content = ln[60:].strip(), ln[:60]
hdr[kind] = content
return hdr
def _timenav(ln: str) -> datetime:
year = int(ln[3:5])
if 80 <= year <= 99:
year += 1900
elif year < 80: # because we might pass in four-digit year
year += 2000
else:
raise ValueError(f"unknown year format {year}")
return datetime(
year=year,
month=int(ln[6:8]),
day=int(ln[9:11]),
hour=int(ln[12:14]),
minute=int(ln[15:17]),
second=int(float(ln[17:20])),
microsecond=int(float(ln[17:22]) % 1 * 1000000),
)
def _skip(f: TextIO, Nl: int):
for _, _ in zip(range(Nl), f):
pass
def navtime2(fn: Union[TextIO, Path]) -> np.ndarray:
"""
read all times in RINEX 2 NAV file
"""
times = []
with opener(fn) as f:
hdr = navheader2(f)
while True:
ln = f.readline()
if not ln:
break
try:
time = _timenav(ln)
except ValueError:
continue
times.append(time)
_skip(f, Nl[hdr["systems"
|
cajone/pychess
|
lib/pychess/System/cairoextras.py
|
Python
|
gpl-3.0
| 2,930
| 0.000341
|
# from: http://cairographics.org/freetypepython/
import ctypes
import cairo
class FreeTypeLibInitializationFailed(Exception):
pass
class PycairoContext(ctypes.Structure):
_fields_ = [("PyObject_HEAD", ctypes.c_byte * object.__basicsize__),
("ctx", ctypes.c_void_p), ("base", ctypes.c_void_p)]
_initialized = False
def create_cairo_font_face_for_file(filename, faceindex=0, loadoptions=0):
global _initialized
global _freetype_so
global _cairo_so
global _ft_lib
global _surface
CAIRO_STATUS_SUCCESS = 0
FT_Err_Ok = 0
if not _initialized:
# find shared objects
_freetype_so = ctypes.CDLL("libfreetype.so.6")
_cairo_so = ctypes.CDLL("libcairo.so.2")
_cairo_so.cairo_ft_font_face_create_for_ft_face.restype = ctypes.c_void_p
_cairo_so.cairo_ft_font_face_create_for_ft_face.argtypes = [
ctypes.c_void_p, ctypes.c_int
]
_cairo_so.cairo_set_font_face.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
_cairo_so.cairo_font_face_status.argtypes = [ctypes.c_void_p]
_cairo_so.cairo_status.argtypes = [ctypes.c_void_p]
# initialize freetype
_ft_lib = ctypes.c_void_p()
if FT_Err_Ok != _freetype_so.FT_Init_FreeType(ctypes.byref(_ft_lib)):
raise FreeTypeLibInitializationFailed
_surface = cairo.ImageSurface(cairo.FORMAT_A8, 0, 0)
_initialized = True
# create freetype face
ft_face = ctypes.c_void_p()
cairo_ctx = cairo.Context(_surface)
cairo_t = PycairoContext.from_address(id(cairo_ctx)).ctx
if FT_Err_Ok != _freetype_so.FT_New_Face(_ft_lib, filename, faceindex,
ctypes.byref(ft_face)):
raise Exception("Error creating FreeType font face for " + filename)
# create cairo font face for freetype face
cr_face = _cairo_so.cairo_ft_font_face_create_for_ft_face(ft_face,
loadoptions)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_font_face_status(cr_face):
raise Exception("Error creating cairo font face for
|
" + filename)
_ca
|
iro_so.cairo_set_font_face(cairo_t, cr_face)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_status(cairo_t):
raise Exception("Error creating cairo font face for " + filename)
face = cairo_ctx.get_font_face()
return face
if __name__ == '__main__':
face = create_cairo_font_face_for_file("../../../pieces/ttf/harlequin.ttf",
0)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 200, 128)
ctx = cairo.Context(surface)
ctx.set_font_face(face)
ctx.set_font_size(30)
ctx.move_to(0, 44)
ctx.show_text("pnbrqk")
ctx.move_to(0, 74)
ctx.show_text("omvtwl")
del ctx
surface.write_to_png("0pieces.png")
|
Mdlkxzmcp/various_python
|
Alpha & Beta/Django/WaifuTracker/main_app/apps.py
|
Python
|
mit
| 90
| 0
|
from dj
|
ango.apps import AppConfig
class MainAppConfig
|
(AppConfig):
name = 'main_app'
|
ghorn/debian-casadi
|
docs/examples/python/simulation.py
|
Python
|
lgpl-3.0
| 4,875
| 0.01641
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from pylab import *
from scipy.linalg import sqrtm
from casadi import *
from casadi.tools import *
# System states
states = struct_symSX(["x","y","dx","dy"])
x,y,dx,dy = states[...]
# System controls
controls = struct_symSX(["u","v"])
u,v = controls[...]
# System parameters
parameters = struct_symSX(["k","c","beta"])
k,c,beta = parameters[...]
# Provide some numerical values
parameters_ = parameters()
parameters_["k"] = 10
parameters_["beta"] = 1
parameters_["c"] = 1
vel = vertcat([dx,dy])
p = vertcat([x,y])
q = vertcat([u,v])
# System dynamics
F = -k*(p-q) - beta*v*sqrt(sumAll(vel**2)+c**2)
# System right hand side
rhs = struct_SX(states)
rhs["x"] = dx
rhs["y"] = dy
rhs["dx"] = F[0]
rhs["dy"] = F[1]
f = SXFunction(controldaeIn(x=states,p=parameters,u=controls),daeOut(ode=rhs))
f.init()
# Simulation output grid
N = 100
tgrid = linspace(0,10.0,N)
# ControlSimulator will output on each node of the timegrid
csim = ControlSimulator(f,tgrid)
csim.setOption("integrator", "cvodes")
csim.setOption("integrator_options",{"abstol":1e-10,"reltol":1e-10})
csim.init()
x0 = states(0)
# Create input profile
co
|
ntrols_ = controls.repeated(csim.getInput("u"))
controls_[0,"u"] = 1 # Kick the system with u=1 at the start
controls_[N/2,"v"] = 2 # Kick the system with v=2 at half the simulatio
|
n time
# Pure simulation
csim.setInput(x0,"x0")
csim.setInput(parameters_,"p")
csim.setInput(controls_,"u")
csim.evaluate()
output = states.repeated(csim.getOutput())
# Plot all states
for k in states.keys():
plot(tgrid,output[vertcat,:,k])
xlabel("t")
legend(tuple(states.keys()))
print "xf=", output[-1]
# The remainder of this file deals with methods to calculate the state covariance matrix as it propagates through the system dynamics
# === Method 1: integrator sensitivity ===
# PF = d(I)/d(x0) P0 [d(I)/d(x0)]^T
P0 = states.squared()
P0[:,:] = 0.01*DMatrix.eye(states.size)
P0["x","dy"] = P0["dy","x"] = 0.002
# Not supported in current revision, cf. #929
# J = csim.jacobian("x0","xf")
# J.init()
# J.setInput(x0,"x0")
# J.setInput(parameters_,"p")
# J.setInput(controls_,"u")
# J.evaluate()
# Jk = states.squared_repeated(J.getOutput())
# F = Jk[-1]
# PF_method1 = mul([F,P0,F.T])
# print "State cov (method 1) = ", PF_method1
# === Method 2: Lyapunov equations ===
# P' = A.P + P.A^T
states_aug = struct_symSX([
entry("orig",sym=states),
entry("P",shapestruct=(states,states))
])
A = jacobian(rhs,states)
rhs_aug = struct_SX(states_aug)
rhs_aug["orig"] = rhs
rhs_aug["P"] = mul(A,states_aug["P"]) + mul(states_aug["P"],A.T)
f_aug = SXFunction(controldaeIn(x=states_aug,p=parameters,u=controls),daeOut(ode=rhs_aug))
f_aug.init()
csim_aug = ControlSimulator(f_aug,tgrid)
csim_aug.setOption("integrator", "cvodes")
csim_aug.init()
states_aug(csim_aug.input("x0"))["orig"] = x0
states_aug(csim_aug.input("x0"))["P"] = P0
csim_aug.setInput(parameters_,"p")
csim_aug.setInput(controls_,"u")
csim_aug.evaluate()
output = states_aug.repeated(csim_aug.getOutput())
PF_method2 = output[-1,"P"]
print "State cov (method 2) = ", PF_method2
# === Method 3: Unscented propagation ===
# Sample and simulate 2n+1 initial points
n = states.size
W0 = 0
x0 = DMatrix(x0)
W = DMatrix([ W0 ] + [(1.0-W0)/2/n for j in range(2*n)])
sqm = sqrtm(n/(1.0-W0)*DMatrix(P0)).real
sample_x = [ x0 ] + [x0+sqm[:,i] for i in range(n)] + [x0-sqm[:,i] for i in range(n)]
csim.setInput(parameters_,"p")
csim.setInput(controls_,"u")
simulated_x = [] # This can be parallelised
for x0_ in sample_x:
csim.setInput(x0_,"x0")
csim.evaluate()
simulated_x.append(csim.getOutput()[-1,:])
simulated_x = vertcat(simulated_x).T
Xf_mean = mul(simulated_x,W)
x_dev = simulated_x-mul(Xf_mean,DMatrix.ones(1,2*n+1))
PF_method3 = mul([x_dev,diag(W),x_dev.T])
print "State cov (method 3) = ", PF_method3
show()
|
duke8253/trafficserver
|
tests/gold_tests/proxy_protocol/proxy_serve_stale_dns_fail.test.py
|
Python
|
apache-2.0
| 3,341
| 0.002095
|
'''
Test proxy serving stale content when DNS lookup fails
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.ContinueOnFail = True
# Set up hierarchical caching processes
ts_child = Test.MakeATSProcess("ts_child")
ts_parent = Test.MakeATSProcess("ts_parent")
server_name = "http://unknown.domain.com/"
Test.testName = "STALE"
# Config child proxy to route to parent proxy
ts_child.Disk.records_config.upd
|
ate({
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.http.cache.max_stale_age': 10,
'proxy.config.http.parent_proxy.self_detect': 0,
})
ts_child.Disk.parent_config.AddLine(
f'dest_domain=. parent=localhost:{ts_parent.Variables.port} round_robin=consistent_hash go_direct=false'
)
ts_child.Disk.remap_config.AddLine(
f'map http://localhost:{ts_child.Variables.port} {server_name}'
)
# Configure parent proxy
ts_parent.Disk.records_config.update({
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.http.cache.max_stale_age': 10,
})
ts_parent.Disk.remap_config.AddLine(
f'map http://localhost:{ts_parent.Variables.port} {server_name}'
)
ts_parent.Disk.remap_config.AddLine(
f'map {server_name} {server_name}'
)
# Object to push to proxies
stale_5 = "HTTP/1.1 200 OK\nServer: ATS/10.0.0\nAccept-Ranges: bytes\nContent-Length: 6\nCache-Control: public, max-age=5\n\nCACHED"
stale_10 = "HTTP/1.1 200 OK\nServer: ATS/10.0.0\nAccept-Ranges: bytes\nContent-Length: 6\nCache-Control: public, max-age=10\n\nCACHED"
# Testing scenarios
child_curl_request = (
# Test child serving stale with failed DNS OS lookup
f'curl -X PUSH -d "{stale_5}" "http://localhost:{ts_child.Variables.port}";'
f'curl -X PUSH -d "{stale_10}" "http://localhost:{ts_parent.Variables.port}";'
f'sleep 7; curl -s -v http://localhost:{ts_child.Variables.port};'
f'sleep 15; curl -s -v http://localhost:{ts_child.Variables.port};'
# Test parent serving stale with failed DNS OS lookup
f'curl -X PUSH -d "{stale_5}" "http://localhost:{ts_parent.Variables.port}";'
f'sleep 7; curl -s -v http://localhost:{ts_parent.Variables.port};'
f'sleep 15; curl -s -v http://localhost:{ts_parent.Variables.port};'
)
# Test case for when parent server is down but child proxy can serve cache object
tr = Test.AddTestRun()
tr.Processes.Default.Command = child_curl_request
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts_child)
tr.Processes.Default.StartBefore(ts_parent)
tr.Processes.Default.Streams.stderr = "gold/serve_stale_dns_fail.gold"
tr.StillRunningAfter = ts_child
tr.StillRunningAfter = ts_parent
|
mhrivnak/pulp
|
server/pulp/server/webservices/controllers/repositories.py
|
Python
|
gpl-2.0
| 7,350
| 0.000952
|
"""
This module contains the web controllers for Repositories.
"""
import logging
import sys
import web
from pulp.common import dateutils
from pulp.server.auth.authorization import READ
from pulp.server.db.model.criteria import UnitAssociationCriteria
from pulp.server.webservices import se
|
rialization
fro
|
m pulp.server.webservices.controllers.base import JSONController
from pulp.server.webservices.controllers.decorators import auth_required
from pulp.server.webservices.controllers.search import SearchController
import pulp.server.exceptions as exceptions
import pulp.server.managers.factory as manager_factory
_logger = logging.getLogger(__name__)
def _merge_related_objects(name, manager, repos):
"""
Takes a list of Repo objects and adds their corresponding related objects
in a list under the attribute given in 'name'. Uses the given manager to
access the related objects by passing the list of IDs for the given repos.
This is most commonly used for RepoImporter or RepoDistributor objects in
lists under the 'importers' and 'distributors' attributes.
@param name: name of the field, such as 'importers' or 'distributors'.
@type name: str
@param manager: manager class for the object type. must implement a method
'find_by_repo_list' that takes a list of repo ids.
@param repos: list of Repo instances that should have importers and
distributors added.
@type repos list of Repo instances
@return the same list that was passed in, just for convenience. The list
itself is not modified- only its members are modified in-place.
@rtype list of Repo instances
"""
repo_ids = tuple(repo['id'] for repo in repos)
# make it cheap to access each repo by id
repo_dict = dict((repo['id'], repo) for repo in repos)
# guarantee that at least an empty list will be present
for repo in repos:
repo[name] = []
for item in manager.find_by_repo_list(repo_ids):
repo_dict[item['repo_id']][name].append(item)
return repos
def _convert_repo_dates_to_strings(repo):
"""
Convert the last_unit_added & last_unit_removed fields of a repository
This modifies the repository in place
:param repo: diatabase representation of a repo
:type repo: dict
"""
# convert the native datetime object to a string with timezone specified
last_unit_added = repo.get('last_unit_added')
if last_unit_added:
new_date = dateutils.to_utc_datetime(last_unit_added,
no_tz_equals_local_tz=False)
repo['last_unit_added'] = dateutils.format_iso8601_datetime(new_date)
last_unit_removed = repo.get('last_unit_removed')
if last_unit_removed:
new_date = dateutils.to_utc_datetime(last_unit_removed,
no_tz_equals_local_tz=False)
repo['last_unit_removed'] = dateutils.format_iso8601_datetime(new_date)
class RepoCollection(JSONController):
# Scope: Collection
# GET: Retrieve all repositories in the system
# POST: Repository Create
@staticmethod
def _process_repos(repos, importers=False, distributors=False):
"""
Apply standard processing to a collection of repositories being returned
to a client. Adds the object link and optionally adds related importers
and distributors.
@param repos: collection of repositories
@type repos: list, tuple
@param importers: iff True, adds related importers under the
attribute "importers".
@type importers: bool
@param distributors: iff True, adds related distributors under the
attribute "distributors".
@type distributors: bool
@return the same list that was passed in, just for convenience. The list
itself is not modified- only its members are modified in-place.
@rtype list of Repo instances
"""
if importers:
_merge_related_objects(
'importers', manager_factory.repo_importer_manager(), repos)
if distributors:
_merge_related_objects(
'distributors', manager_factory.repo_distributor_manager(), repos)
for repo in repos:
repo.update(serialization.link.search_safe_link_obj(repo['id']))
_convert_repo_dates_to_strings(repo)
# Remove internally used scratchpad from repo details
if 'scratchpad' in repo:
del repo['scratchpad']
return repos
class RepoSearch(SearchController):
def __init__(self):
super(RepoSearch, self).__init__(
manager_factory.repo_query_manager().find_by_criteria)
@auth_required(READ)
def GET(self):
query_params = web.input()
if query_params.pop('details', False):
query_params['importers'] = True
query_params['distributors'] = True
items = self._get_query_results_from_get(
('details', 'importers', 'distributors'))
RepoCollection._process_repos(
items,
query_params.pop('importers', False),
query_params.pop('distributors', False)
)
return self.ok(items)
@auth_required(READ)
def POST(self):
"""
Searches based on a Criteria object. Requires a posted parameter
'criteria' which has a data structure that can be turned into a
Criteria instance.
"""
items = self._get_query_results_from_post()
RepoCollection._process_repos(
items,
self.params().get('importers', False),
self.params().get('distributors', False)
)
return self.ok(items)
class RepoUnitAdvancedSearch(JSONController):
# Scope: Search
# POST: Advanced search for repo unit associations
@auth_required(READ)
def POST(self, repo_id):
# Params
params = self.params()
query = params.get('criteria', None)
repo_query_manager = manager_factory.repo_query_manager()
repo = repo_query_manager.find_by_id(repo_id)
if repo is None:
raise exceptions.MissingResource(repo_id=repo_id)
if query is None:
raise exceptions.MissingValue(['criteria'])
try:
criteria = UnitAssociationCriteria.from_client_input(query)
except:
_logger.error('Error parsing association criteria [%s]' % query)
raise exceptions.PulpDataException(), None, sys.exc_info()[2]
# Data lookup
manager = manager_factory.repo_unit_association_query_manager()
if criteria.type_ids is not None and len(criteria.type_ids) == 1:
type_id = criteria.type_ids[0]
units = manager.get_units_by_type(repo_id, type_id, criteria=criteria)
else:
units = manager.get_units_across_types(repo_id, criteria=criteria)
return self.ok(units)
# These are defined under /v2/repositories/ (see application.py to double-check)
urls = (
'/search/$', 'RepoSearch', # resource search
'/([^/]+)/search/units/$', 'RepoUnitAdvancedSearch', # resource search
)
application = web.application(urls, globals())
|
mushtaqak/edx-platform
|
common/djangoapps/enrollment/views.py
|
Python
|
agpl-3.0
| 21,774
| 0.004225
|
"""
The Enrollment API Views should be simple, lean HTTP endpoints for API access. This should
consist primarily of authentication, request validation, and serialization.
"""
import logging
from ipware.ip import get_ip
from django.core.exceptions import ObjectDoesNotExist
from django.utils.decorators import method_decorator
from opaque_keys import InvalidKeyError
from course_modes.models import CourseMode
from openedx.core.djangoapps.user_api.preferences.api import update_email_opt_in
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission, ApiKeyHeaderPermissionIsAuthenticated
from rest_framework import status
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle
from rest_framework.views import APIView
from opaque_keys.edx.
|
keys import CourseKey
from embargo import api as embargo_api
from cors_csrf.authentication import SessionAuthenticationCrossDomainCsrf
from cors_csrf.decorators import ensure_csrf_cookie_cross_domain
from openedx.core.lib.api.authentication import (
SessionAuthenticationAllowInactiveUser,
OAuth2AuthenticationAllowInactiveUser,
)
from util.disable_rate_limit import can_disable_rate_
|
limit
from enrollment import api
from enrollment.errors import (
CourseNotFoundError, CourseEnrollmentError,
CourseModeNotFoundError, CourseEnrollmentExistsError
)
from student.models import User
log = logging.getLogger(__name__)
class EnrollmentCrossDomainSessionAuth(SessionAuthenticationAllowInactiveUser, SessionAuthenticationCrossDomainCsrf):
"""Session authentication that allows inactive users and cross-domain requests. """
pass
class ApiKeyPermissionMixIn(object):
"""
This mixin is used to provide a convenience function for doing individual permission checks
for the presence of API keys.
"""
def has_api_key_permissions(self, request):
"""
Checks to see if the request was made by a server with an API key.
Args:
request (Request): the request being made into the view
Return:
True if the request has been made with a valid API key
False otherwise
"""
return ApiKeyHeaderPermission().has_permission(request, self)
class EnrollmentUserThrottle(UserRateThrottle, ApiKeyPermissionMixIn):
"""Limit the number of requests users can make to the enrollment API."""
rate = '40/minute'
def allow_request(self, request, view):
return self.has_api_key_permissions(request) or super(EnrollmentUserThrottle, self).allow_request(request, view)
@can_disable_rate_limit
class EnrollmentView(APIView, ApiKeyPermissionMixIn):
"""
**Use Cases**
Get the user's enrollment status for a course.
**Example Requests**:
GET /api/enrollment/v1/enrollment/{username},{course_id}
**Response Values**
* created: The date the user account was created.
* mode: The enrollment mode of the user in this course.
* is_active: Whether the enrollment is currently active.
* course_details: A collection that includes:
* course_id: The unique identifier for the course.
* enrollment_start: The date and time that users can begin enrolling in the course.
If null, enrollment opens immediately when the course is created.
* enrollment_end: The date and time after which users cannot enroll for the course.
If null, the enrollment period never ends.
* course_start: The date and time at which the course opens.
If null, the course opens immediately when created.
* course_end: The date and time at which the course closes. If null, the course never ends.
* course_modes: An array of data about the enrollment modes supported for the course.
Each enrollment mode collection includes:
* slug: The short name for the enrollment mode.
* name: The full name of the enrollment mode.
* min_price: The minimum price for which a user can enroll in this mode.
* suggested_prices: A list of suggested prices for this enrollment mode.
* currency: The currency of the listed prices.
* expiration_datetime: The date and time after which users cannot enroll in the course in this mode.
* description: A description of this mode.
* invite_only: Whether students must be invited to enroll in the course; true or false.
* user: The ID of the user.
"""
authentication_classes = OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser
permission_classes = ApiKeyHeaderPermissionIsAuthenticated,
throttle_classes = EnrollmentUserThrottle,
# Since the course about page on the marketing site uses this API to auto-enroll users,
# we need to support cross-domain CSRF.
@method_decorator(ensure_csrf_cookie_cross_domain)
def get(self, request, course_id=None, username=None):
"""Create, read, or update enrollment information for a user.
HTTP Endpoint for all CRUD operations for a user course enrollment. Allows creation, reading, and
updates of the current enrollment for a particular course.
Args:
request (Request): To get current course enrollment information, a GET request will return
information for the current user and the specified course.
course_id (str): URI element specifying the course location. Enrollment information will be
returned, created, or updated for this particular course.
username (str): The username associated with this enrollment request.
Return:
A JSON serialized representation of the course enrollment.
"""
username = username or request.user.username
# TODO Implement proper permissions
if request.user.username != username and not self.has_api_key_permissions(request) \
and not request.user.is_superuser:
# Return a 404 instead of a 403 (Unauthorized). If one user is looking up
# other users, do not let them deduce the existence of an enrollment.
return Response(status=status.HTTP_404_NOT_FOUND)
try:
return Response(api.get_enrollment(username, course_id))
except CourseEnrollmentError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"An error occurred while retrieving enrollments for user "
u"'{username}' in course '{course_id}'"
).format(username=username, course_id=course_id)
}
)
@can_disable_rate_limit
class EnrollmentCourseDetailView(APIView):
"""
**Use Cases**
Get enrollment details for a course.
Response values include the course schedule and enrollment modes supported by the course.
Use the parameter include_expired=1 to include expired enrollment modes in the response.
**Note:** Getting enrollment details for a course does not require authentication.
**Example Requests**:
GET /api/enrollment/v1/course/{course_id}
GET /api/v1/enrollment/course/{course_id}?include_expired=1
**Response Values**
A collection of course enrollments for the user, or for the newly created enrollment.
Each course enrollment contains:
* course_id: The unique identifier of the course.
* enrollment_start: The date and time that users can begin enrolling in the course.
If null, enrollment opens immediately when the course is created.
* enrollment_end: The date and time after which users cannot enroll for the course.
If null, the enrollment period never ends.
* course_start: Th
|
rh-marketingops/dwm
|
dwm/test/test_normIncludes.py
|
Python
|
gpl-3.0
| 407
| 0.007371
|
normIncludes = [
|
{"fieldName": "field1", "includes": "GOOD,VALUE",
|
"excludes": "BAD,STUFF", "begins": "", "ends": "", "replace": "goodvalue"},
{"fieldName": "field1", "includes": "", "excludes": "", "begins": "ABC", "ends": "", "replace": "goodvalue"},
{"fieldName": "field1", "includes": "", "excludes": "", "begins": "", "ends": "XYZ", "replace": "goodvalue"},
{"fieldName": "field100"}
]
|
martynovp/edx-platform
|
openedx/core/djangoapps/util/testing.py
|
Python
|
agpl-3.0
| 8,831
| 0.002038
|
""" Mixins for setting up particular course structures (such as split tests or cohorted content) """
from datetime import datetime
from pytz import UTC
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.partitions.partitions import UserPartition, Group
from student.tests.factories import CourseEnrollmentFactory, UserFactory
class ContentGroupTestCase(ModuleStoreTestCase):
"""
Sets up discussion modules visible to content groups 'Alpha' and
'Beta', as well as a module visible to all students. Creates a
staff user, users with access to Alpha/Beta (by way of cohorts),
and a non-cohorted user with no special access.
"""
def setUp(self):
super(ContentGroupTestCase, self).setUp()
self.course = CourseFactory.create(
org='org', number='number', run='run',
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime(2012, 2, 3, tzinfo=UTC),
user_partitions=[
UserPartition(
0,
'Content Group Configuration',
'',
[Group(1, 'Alpha'), Group(2, 'Beta')],
scheme_id='cohort'
)
],
grading_policy={
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"passing_grade": 0,
"weight": 1.0
}]
},
cohort_config={'cohorted': True},
discussion_topics={}
)
self.staff_user = UserFactory.create(is_staff=True)
self.alpha_user = UserFactory.create()
self.beta_user = UserFactory.create()
self.non_cohorted_user = UserFactory.create()
for user in [self.staff_user, self.alpha_user, self.beta_user, self.non_cohorted_user]:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
alpha_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Alpha',
users=[self.alpha_user]
)
beta_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Beta',
users=[self.beta_user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=alpha_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[0].id
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=beta_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[1].id
)
self.alpha_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='alpha_group_discussion',
discussion_target='Visible to Alpha',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.beta_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='beta_group_discussion',
discussion_target='Visible to Beta',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
self.global_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='global_group_discussion',
discussion_target='Visible to Everyone'
)
self.course = self.store.get_item(self.course.location)
class TestConditionalContent(ModuleStoreTestCase):
"""
Construct a course with graded problems that exist within a split test.
"""
TEST_SECTION_NAME = 'Problem'
def setUp(self):
"""
Set up a course with graded problems within a split test.
Course hierarchy is as follows (modeled after how split tests
are created in studio):
-> course
-> chapter
-> sequential (graded)
-> vertical
-> split_test
-> vertical (Group A)
-> problem
-> vertical (Group B)
-> problem
"""
super(TestConditionalContent, self).setUp()
# Create user partitions
self.user_partition_group_a = 0
self.user_partition_group_b = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_a, 'Group A'),
Group(self.user_partition_group_b, 'Group B')
]
)
# Create course with group configurations and grading policy
self.course = CourseFactory.create(
user_partitions=[self.partition],
grading_policy={
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
|
"passing_grade": 0,
"weight": 1.0
}]
}
)
chapter = ItemFactory.create(parent_location=self.course.location,
|
display_name='Chapter')
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(parent_location=chapter.location,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name=self.TEST_SECTION_NAME)
# Create users and partition them
self.student_a = UserFactory.create(username='student_a', email='student_a@example.com')
CourseEnrollmentFactory.create(user=self.student_a, course_id=self.course.id)
self.student_b = UserFactory.create(username='student_b', email='student_b@example.com')
CourseEnrollmentFactory.create(user=self.student_b, course_id=self.course.id)
UserCourseTagFactory(
user=self.student_a,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(self.user_partition_group_a)
)
UserCourseTagFactory(
user=self.student_b,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(self.user_partition_group_b)
)
# Create a vertical to contain our split test
problem_vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
display_name='Problem Unit'
)
# Create the split test and child vertical containers
vertical_a_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_a')
vertical_b_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_b')
self.split_test = ItemFactory.create(
parent_location=problem_vertical.location,
category='split_test',
display_name='Split Test',
u
|
peace098beat/pyside_cookbook
|
10_MatplotlibVSPygraph/mplcanvas/__init__.py
|
Python
|
gpl-3.0
| 104
| 0
|
from .SignalDataCanvas import SignalDataCanvas
from .SignalDataCanvas import S
|
ignalDataCanvasFast
|
|
RedHatInsights/insights-core
|
insights/combiners/ceph_version.py
|
Python
|
apache-2.0
| 2,860
| 0.001399
|
"""
Ceph Version
============
Combiner for Ceph Version information. It uses the results of
the ``CephVersion``, ``CephInsights`` and ``CephReport`` parsers.
The order from most preferred to least preferred is `CephVersion``, ``CephInsights``, ``CephReport``.
"""
from insights import combiner
from insights.parsers.ceph_version import CephVersion as CephV
from insights.parsers.ceph_insights import CephInsights
from insights.parsers.ceph_cmd_json_parsing import CephReport
from insights.core.context import Context
@combiner([CephV, CephInsights, CephReport])
class CephVersion(object):
"""
Combiner for Ceph Version information. It uses the results of
the ``CephVersion``, ``CephInsights`` and ``CephReport`` parsers.
The prefered parsing order is `CephVersion``, ``CephInsights``, ``CephReport``.
Attributes:
version (str): The Red Hat release version
major (str): The major version of Red Hat release version
minor (str): The minor version of Red Hat release version
is_els (boolean): If the verion in 'Extended life cycle support (ELS) add-on' phase
downstream_release (str): The downstream release info
upstream_version (dict): The detailed upstream version info with the
following keys `release (int)`, `major (int)` and `minor (int)`.
Examples:
>>> type(cv)
<class 'insights.combiners.ceph_version.CephVersion'>
>>> cv.version
'3.2'
>>> cv.major
'3'
>>> cv.minor
'2'
>>> cv.is_els
False
>>> cv.downstream_release
'0'
>>> cv.upstream_version["release"]
12
>>> cv.upstream_version["major"]
2
>>> cv.upstream_version["minor"]
8
"""
def __init__(self, cv, ci, cr):
if cv:
self.version = cv.version
self.major = cv.major
self.minor = cv.minor
self.is_els = cv.is_els
self.downstream_release = cv.downstream_release
self.upstream_version = cv.upstream_version
elif ci:
context = Context(content=ci.data["version"]["full"].strip().splitlines())
cv = CephV(context)
self.version = cv.version
self
|
.major = cv.major
self.minor = cv.minor
self.is_els = cv.is_els
self.downstream_release = cv.downstream_release
self.upstream_version = cv.upstream_version
else:
con
|
text = Context(content=cr["version"].strip().splitlines())
cv = CephV(context)
self.version = cv.version
self.major = cv.major
self.minor = cv.minor
self.is_els = cv.is_els
self.downstream_release = cv.downstream_release
self.upstream_version = cv.upstream_version
|
derekjchow/models
|
research/feelvos/utils/video_input_generator.py
|
Python
|
apache-2.0
| 24,003
| 0.006208
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for providing semantic segmentation video data."""
import tensorflow as tf
from feelvos import input_preprocess
from feelvos import model
from feelvos.utils import mask_damaging
from feelvos.utils import train_utils
slim = tf.contrib.slim
dataset_data_provider = slim.dataset_data_provider
MIN_LABEL_COUNT = 10
def decode_image_sequence(tensor, image_format='jpeg', shape=None,
channels=3, raw_dtype=tf.uint8):
"""Decodes a sequence of images.
Args:
tensor: the tensor of strings to decode, shape: [num_images]
image_format: a string (possibly tensor) with the format of the image.
Options include 'jpeg', 'png', and 'raw'.
shape: a list or tensor of the decoded image shape for a single image.
channels: if 'shape' is None, the third dimension of the image is set to
this value.
raw_dtype: if the image is encoded as raw bytes, this is the method of
decoding the bytes into values.
Returns:
The decoded images with shape [time, height, width, channels].
"""
handler = slim.tfexample_decoder.Image(
shape=shape, channels=channels, dtype=raw_dtype, repeated=True)
return handler.tensors_to_item({'image/encoded': tensor,
'image/format': image_format})
def _get_data(data_provider, dataset_split, video_frames_are_decoded):
"""Gets data from data provider.
Args:
data_provider: An object of slim.data_provider.
dataset_split: Dataset split.
video_frames_are_decoded: Boolean, whether the video frames are already
decoded
Returns:
image: Image Tensor.
label: Label Tensor storing segmentation annotations.
object_label: An integer refers to object_label according to labelmap. If
the example has more than one object_label, take the first one.
image_name: Image name.
height: Image height.
width: Image width.
video_id: String tensor representing the name of the video.
Raises:
ValueError: Failed to find label.
"""
if video_frames_are_decoded:
image, = data_provider.get(['image'])
else:
image, = data_provider.get(['image/encoded'])
# Some datasets do not contain image_name.
if 'image_name' in data_provider.list_items():
image_name, = data_provider.get(['image_name'])
else:
image_name = tf.constant('')
height, width = data_provider.get(['height', 'width'])
label = None
if dataset_split != 'test':
if video_frames_are_decoded:
if 'labels_class' not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provider.get(['labels_class'])
else:
key = 'segmentation/object/encoded'
if key not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provide
|
r.get([key])
object_label = None
video_id, = data_provider.get(['video_id'])
return image, label, object_label, image_name, height, width, video_id
def _has_foreground_and_backgr
|
ound_in_first_frame(label, subsampling_factor):
"""Checks if the labels have foreground and background in the first frame.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
subsampling_factor: Integer, the subsampling factor.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis],
[h // subsampling_factor,
w // subsampling_factor],
align_corners=True),
axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_foreground_and_background_in_first_frame_2(label,
decoder_output_stride):
"""Checks if the labels have foreground and background in the first frame.
Second attempt, this time we use the actual output dimension for resizing.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_enough_pixels_of_each_object_in_first_frame(
label, decoder_output_stride):
"""Checks if for each object (incl. background) enough pixels are visible.
During test time, we will usually not see a reference frame in which only
very few pixels of one object are visible. These cases can be problematic
during training, especially if more than the 1-nearest neighbor is used.
That's why this function can be used to detect and filter these cases.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have enough pixels of each object in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
_, _, counts = tf.unique_with_counts(
tf.reshape(label_downscaled, [-1]))
has_enough_pixels_per_object = tf.reduce_all(
tf.greater_equal(counts, MIN_LABEL_COUNT))
return has_enough_pixels_per_object
def get(dataset,
num_frames_per_video,
crop_size,
batch_size,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
preprocess_image_and_label=True,
num_readers=1,
num_threads=1,
dataset_split=None,
is_training=True,
model_variant=None,
batch_capacity_factor=32,
video_frames_are_decoded=False,
decoder_output_stride=None,
first_frame_finetuning=False,
sample_only_first_frame_for_finetuning=False,
sample_adjacent_and_consistent_query_frames=False,
remap_l
|
wcooley/python-gryaml
|
tests/test_gryaml.py
|
Python
|
mit
| 18,899
| 0.000106
|
"""Tests for `gryaml` module."""
from __future__ import print_function
from textwrap import dedent
from typing import Callable, List, Union
import pytest
import yaml
from boltons.iterutils import first
import gryaml
import py2neo_compat
# noinspection PyProtectedMember
from gryaml.pyyaml import _unregister as gryaml_unregister
from py2neo_compat import (
foremost,
Graph,
Node,
node,
rel,
Relationship,
) # noqa: F401
py2neo_compat.monkey_patch_py2neo()
@pytest.fixture(autouse=True)
def unregister_gryaml():
"""Ensure every test explicitly registers."""
gryaml_unregister()
@pytest.mark.usefixtures('graphdb_offline')
@pytest.mark.unit
def test_node_parameter_permutation_offline(sample_yaml):
# type: (Callable[[str], str]) -> None
"""Test nodes offline."""
gryaml.register()
result = yaml.load(sample_yaml('node-parameter-permutations'))
# All nodes
assert 3 == len(result)
# No relationships
assert {type(Node())} == {type(n) for n in result}
# 2 nodes with 'person' label
assert 2 == len([n for n in result if n.labels])
assert {'person'} == set(first(n.labels) for n in result if n.labels)
# 2 nodes with `occupation` property
occupations = [n['occupation'] for n in result if n['occupation']]
assert 2 == len(occupations)
assert {'Comedian', 'Game Show Host'} == set(occupations)
@pytest.mark.integration
def test_node_parameter_permutations(graphdb, sample_yaml):
# type: (Graph, Callable[[str], str]) -> None
"""Test node representation."""
gryaml.register()
result = yaml.load(sample_yaml('node-parameter-permutations'))
assert 3 == len(result)
result = match_all_nodes(graphdb)
assert 3 == len(result) # All nodes
result = match_all_nodes_and_rels(graphdb)
assert 0 == len(result) # No relationships
result = graphdb.cypher.execute('MATCH (n:person) RETURN n')
assert 2 == len(result) # 2 nodes with `person` label
result = graphdb.cypher.execute('MATCH (n) WHERE exists(n.occupation)'
' RETURN n')
assert 2 == len(result) # 2 nodes with `occupation` property
@pytest.mark.usefixtures('graphdb_offline')
@pytest.mark.unit
def test_relationship_structures_offline(sample_yaml):
# type: (Callable[[str], str]) -> None
"""Test relationship representations offline."""
gryaml.register()
result = yaml.load(sample_yaml('relationships'))
assert 5 == len(result)
nodes = [n for n in result if isinstance(n, Node)]
assert 3 == len(nodes) # 3 nodes
rels = [r for r in result if isinstance(r, Relationship)]
assert 2 == len(rels) # 2 relationships
directed_rel = [(r.start_node, r, r.end_node)
for r in result
if isinstance(r, Relationship) and r.type == 'DIRECTED']
assert_lana_directed_matrix(directed_rel)
@pytest.mark.integration
def test_relationship_structures(graphdb, sample_yaml):
# type: (Graph, Callable[[str], str]) -> None
"""Test relationship representation."""
gryaml.register()
result = yaml.load(sample_yaml('relationships'))
assert 5 == len(result)
result = match_all_nodes(graphdb)
assert 3 == len(result) # 3 nodes
result = match_all_nodes_and_rels(graphdb)
assert 2 == len(result) # 2 relationships
result = graphdb.cypher.execute('MATCH (p)-[r:DIRECTED]->(m)'
' RETURN p,r,m')
assert_lana_directed_matrix(result)
@pytest.mark.usefixtures('graphdb_offline')
@pytest.mark.unit
def test_complex_related_graph_offline(sample_yaml):
# type: (Callable[[str], str]) -> None
"""Test graph with multiples nodes & relationships offline."""
gryaml.register()
result = yaml.load(sample_yaml('nodes-and-relationships'))
assert 21 == len(result)
directed_rel = [(r.start_node, r, r.end_node)
for r in result
if isinstance(r, Relationship) and
r.type == 'DIRECTED' and
r.end_node['title'] == 'The Matrix']
assert_lana_directed_matrix(directed_rel)
@pytest.mark.integration
def test_complex_related_graph(graphdb, sample_yaml):
# type: (Graph, Callable[[str], str]) -> None
"""Test loading a graph with multiple nodes & relationships."""
gryaml.register()
result = yaml.load(sample_yaml('nodes-and-relationships'))
assert 21 == len(result)
result = graphdb.cypher.execute("""
MATCH (p)-[r:DIRECTED]->(m{title:"The Matrix"})
RETURN p,r,m
""")
assert_lana_directed_matrix(result)
@pytest.fixture
def sample_simple_rel():
# type: () -> Relationship
"""Produce a sample relationship."""
# Awkward underscore avoids even more awkward quoting.
return rel(node({'name': 'Babs_Jensen'}),
'CHARACTER_IN',
node({'name': 'Animal_House'}))
@pytest.mark.integration
def test_node_can_be_loaded_and_created(graphdb):
# type: (Relationship) -> None
"""Test loading a single node, creating in the DB and returning it."""
gryaml.register()
sample_yaml = """
!gryaml.node
- properties:
name: Babs_Jensen
- labels:
- person
"""
node_loaded = yaml.load(sample_yaml)
node_fou
|
nd = foremost(match_all_nodes(graphdb))
assert node_loaded == node_found
node_data = yaml.load(sample_yaml.replace('!gryaml.node', ''))
assert node_data[0]['properties'] == py2neo_compat.to_dict(node_loaded)
assert node_data[1]['labels'] == list(node_loaded.labels)
@pytest.mark.unit
def test_node_can_be_loaded_simple():
# type: () -> None
"""Test loading a single node
|
with "simple" representation.
The "simple" representation should return the same structure that would
be created if the '!gryaml.node' tag were absent or the implicit type.
"""
gryaml.register_simple()
sample_yaml = """
!gryaml.node
- properties:
name: Babs_Jensen
- labels:
- person
"""
node_loaded = yaml.safe_load(sample_yaml)
node_data = yaml.load(sample_yaml.replace('!gryaml.node', ''))
assert node_data == node_loaded
node_data = yaml.load(sample_yaml.replace('!gryaml.node', '!!seq'))
assert node_data == node_loaded
@pytest.mark.unit
def test_node_can_be_dumped(sample_simple_rel):
# type: (Relationship) -> None
"""Test dump/represent Node."""
gryaml.register()
sample_node = sample_simple_rel.start_node
node_yaml = yaml.dump(sample_node, canonical=True)
node_yaml = node_yaml.replace('!!python/unicode', '!!str')
assert dedent("""
---
!gryaml.node [
!!map {
? !!str "properties"
: !!map {
? !!str "name"
: !!str "Babs_Jensen",
},
},
] """).strip() == node_yaml.strip()
@pytest.mark.unit
def test_node_subclass_can_be_dumped():
# type: (Relationship) -> None
"""Test dump/represent Node."""
gryaml.register()
class MyNode(py2neo_compat.Node):
@classmethod
def new(cls, **kwargs):
"""Construct an abstract/unbound MyNode, properties only."""
if py2neo_compat.py2neo_ver == 1:
inst = cls(None)
inst.set_properties(kwargs)
return inst
else:
return cls(**kwargs)
sample_node = MyNode.new(name='Babs_Jensen')
node_yaml = yaml.dump(sample_node, canonical=True)
node_yaml = node_yaml.replace('!!python/unicode', '!!str')
assert dedent("""
---
!gryaml.node [
!!map {
? !!str "properties"
: !!map {
? !!str "name"
: !!str "Babs_Jensen",
},
},
] """).strip() == node_yaml.strip()
@pytest.mark.unit
def test_node_can_be_dumped_simple(sample_simple_rel):
# type: (Relationship) -> None
"""Test dump/represent Node."""
gryaml.register_simple()
# gryaml.register_simple(safe=False)
sample_node = sample_si
|
drphilmarshall/IntroBot
|
introbot.py
|
Python
|
gpl-2.0
| 594
| 0.005051
|
import introbot
# Connect to twitter, using your own account...
try: from introbot import connection
except:
print "IntroBot: unable t
|
o connect to to Twitte
|
r"
sys.exit()
print "IntroBot: connected to Twitter: ",connection.line
# Instantiate a host, who will do the introductions:
jeeves = introbot.Host()
# Pass it the data it needs to figure out what to say:
# database = 'example/SHD_users_bios.csv'
database = 'example/pie.csv'
jeeves.listen(database)
# Pass the twitter object to the IntroBot, and have it start sending statuses.
jeeves.introduce(connection.line)
# Done!
|
dansimau/pystringattr
|
stringattr.py
|
Python
|
bsd-3-clause
| 5,928
| 0.000506
|
import re
from collections import namedtuple, deque
from .regex import RE_SPLIT, RE_KEY, RE_INDEX
from .utils import Enum, _missing
StackItem = namedtuple('StackItem', ['name', 'a
|
ccess_method'])
# Accessor methods:
# INDEX means this accessor is index or key-based, eg. [1] or ['foo']
# DEFAULT means property
AccessorType = Enum(['INDEX', 'DEFAULT'])
def first(*vals):
"""Return the first value that's not _missing."""
for val in vals:
if val is not _missing:
return val
return _missing
def get_key(obj, index):
|
"""Retrieve index or key from the specified obj, or return
_missing if it does not exist.
"""
try:
return obj[index]
except (KeyError, IndexError, TypeError):
return _missing
def get_attribute(obj, attr):
"""Retrieve attribute from the specified obj, or return
_missing if it does not exist.
"""
try:
return getattr(obj, attr)
except AttributeError:
return _missing
class StringAttribute(object):
"""
Used to access a deeply nested attributes of a Python data structure
using a string representation of Python-like syntax.
Eg.:
# Data structure
my_dict = {
'foo': {
'bar': [
{'baz': 'wee'},
{'baz': 'woo'}
]
}
}
# Return 'woo'
StringAttribute('foo.bar[1].baz').get(my_dict)
"""
def __init__(self, string_attr_path=None, default=_missing):
self._default = default
if string_attr_path is not None:
self._stack = self._parse(string_attr_path)
self._string_attr_path = string_attr_path
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._string_attr_path)
def __str__(self):
return '%r' % self._string_attr_path
def _get(self, obj, stack, default=_missing):
"""Retrieve value from an object structure given a list of
attributes."""
pointer = obj
# Try all access methods
for accessor in stack:
# Key or index accessors
if accessor.access_method == AccessorType.INDEX:
pointer = get_key(pointer, accessor.name)
# Default accessor
elif accessor.access_method == AccessorType.DEFAULT:
# Attempt to get the object attribute first, or if that fails
# try to get a key with that name or list index
pointer = first(get_attribute(pointer, accessor.name),
get_key(pointer, accessor.name))
# If nothing could be accessed return None or raise an error
if pointer is _missing:
if default is not _missing:
return default
else:
self._raise_exception(obj, accessor.name)
return pointer
def _parse(self, string_attr_path):
"""Parse string_attr_path into a stack of accessors."""
stack = deque()
for node in self._split(string_attr_path):
# Node is a list index (eg. '[2]')
if re.match(RE_INDEX, node):
# Convert into integer
list_index = int(node.translate(None, '[]'))
stack.append(StackItem(list_index, AccessorType.INDEX))
# Node is a key (string-based index)
elif re.match(RE_KEY, node):
key = re.match(RE_KEY, node).groups()[0]
stack.append(StackItem(key, AccessorType.INDEX))
else:
# Default accessor method
stack.append(StackItem(node, AccessorType.DEFAULT))
return stack
def _raise_exception(self, obj, node):
"""Raise exception."""
raise Exception('%r object has no key or attribute at path %r' % (obj.__class__.__name__, node))
@classmethod
def _split(cls, string_attr_path):
"""Split string into list of accessor nodes."""
# Split string at '.' and '[0]'
nodes = re.split(RE_SPLIT, string_attr_path)
# Filter out empty position params from the split
nodes = filter(lambda x: x, nodes)
return nodes
def get(self, obj, string_attr_path=None, default=_missing):
"""Retrieve value from an object structure using string
representation of attributes path."""
# Get defaults
if default is _missing:
default = self._default
if string_attr_path is not None:
stack = self._parse(string_attr_path)
else:
string_attr_path = self._string_attr_path
stack = self._stack
return self._get(obj, stack, default)
def set(self, base_obj, value, string_attr_path=None):
"""Set value on an object structure using string representation
of attributes path."""
if string_attr_path is not None:
stack = self._parse(string_attr_path)
else:
string_attr_path = self._string_attr_path
stack = self._stack
# Get the name of the attribute we're setting (the last item in
# the stack)
attr = stack.pop()
# Get the actual object we're going to operate on
target_obj = self._get(base_obj, stack)
# Set the attribute or key value
if attr.access_method == AccessorType.INDEX:
target_obj[attr.name] = value
else:
setattr(target_obj, attr.name, value)
# Wrapper functions for a builtin-esque feel...
def getstrattr(obj, attr, default=_missing):
"""Retrieve value from an object structure using string
representation of attributes path."""
return StringAttribute().get(obj, attr, default)
def setstrattr(obj, attr, val):
"""Set value on an object structure using string representation
of attributes path."""
return StringAttribute().set(obj, val, attr)
|
agrover/targetd
|
setup.py
|
Python
|
gpl-3.0
| 358
| 0
|
#!/usr/bin/env python
from distu
|
tils.core import setup
setup(
name='targetd',
version='0.8.8',
description='Linux remote storage API daemon',
|
license='GPLv3',
maintainer='Andy Grover',
maintainer_email='agrover@redhat.com',
url='http://github.com/open-iscsi/targetd',
packages=['targetd'],
scripts=['scripts/targetd']
)
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_30/ar_/test_artificial_1024_Anscombe_Lag1Trend_30__0.py
|
Python
|
bsd-3-clause
| 264
| 0.087121
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D'
|
, seed = 0, trendtype = "Lag1Trend",
|
cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 0);
|
MasterOdin/forseti
|
main.py
|
Python
|
mit
| 702
| 0.001425
|
"""
A brief demonstration
|
of using the prover within Forseti
"""
from __future__ import print_function
from forseti.prover import Prover
# pylint: disable=duplicate-code
prover = Prover()
prover.add_formula("or(iff(G,H),iff(not(G),H))")
prover.add_goal("or(iff(not(G),not(H)),not(iff(G,H)))")
print(prover.run_prover())
print("\n".join(prover.get_proof()))
print("\n\n")
prover = Prover()
prover.add_formula("forall(x,if(S(x),exists(y,and(S(y)
|
,forall(z,iff(B(z,y),and(B(z,x),B(z,z))))))))")
prover.add_formula("forall(x,not(B(x,x)))")
prover.add_formula("exists(x,S(x))")
prover.add_goal("exists(x,and(S(x),forall(y,not(B(y,x)))))")
print(prover.run_prover())
print("\n".join(prover.get_proof()))
|
rootfs/ctdb
|
tests/takeover/simulation/node_group.py
|
Python
|
gpl-3.0
| 1,299
| 0.004619
|
#!/usr/bin/env python
# This demonstrates a node group configurations.
#
# Node groups can be defined with the syntax "-g N@IP0,IP1-IP2,IP3".
# This says to create a group of N nodes with IPs IP0, IP1, ..., IP2,
# IP3. Run it with deterministic IPs causes lots of gratuitous IP
# reassignments. Running with --nd fixes this.
import ctdb_takeover
import sys
from optparse import make_option
import string
ctdb_takeover.process_args([
make_option("-g", "--group",
action="append", type="string", dest="groups",
help="define a node group using N@IPs syntax"),
])
def expand_range(r):
sr = r.split("-", 1)
if len(sr) == 2:
all = string.ascii_uppercase + string.ascii_lowercase
sr = list(all[all.index(sr[0]):all.index(sr[1])+1])
return sr
def add_node_group(s):
(count, ips_str) = s.split("@", 1)
ips = [i for r in ips_str.split(",") \
for i in expand_range(r) if r != ""]
for i in range(int(count)):
c.add_node(ctdb_takeover.Node(ips))
c = ctdb_takeover.Cluster()
if ctdb_takeover.options.groups is None:
pri
|
nt "Error: no node groups de
|
fined."
sys.exit(1)
for g in ctdb_takeover.options.groups:
add_node_group(g)
c.recover()
c.random_iterations()
|
FAForever/client
|
src/model/player.py
|
Python
|
gpl-3.0
| 4,165
| 0
|
from PyQt5.QtCore import pyqtSignal
from model.modelitem import ModelItem
from model.rating import RatingType
from model.transaction import transactional
class Player(ModelItem):
newCurrentGame = pyqtSignal(object, object, object)
"""
Represents a player the client knows about.
"""
def __init__(
self,
id_,
login,
ratings={},
avatar=None,
country=None,
clan=None,
league=None,
**kwargs
):
ModelItem.__init__(self)
"""
Initialize a Player
"""
# Required fields
# Login should be mutable, but we look up things by login right now
self.id = int(id_)
self.login = login
self.add_field("avatar", avatar)
self.add_field("country", country)
self.add_field("clan", clan)
self.add_field("league", league)
self.add_field("ratings", ratings)
# The game the player is currently playing
self._currentGame = None
@property
def id_key(self):
return self.id
def copy(self):
p = Player(self.id, self.login, **self.field_dict)
p.currentGame = self.currentGame
return p
@transactional
def update(self, **kwargs):
_transaction = kwargs.pop("_transaction")
old_data = self.copy()
ModelItem.update(self, **kwargs)
self.emit_update(old_data, _transaction)
def __index__(self):
return self.id
@property
|
def global_estimate(self):
return self.rating_estimate()
@property
def ladder_estimate(self):
return self.rating_estimate(RatingType.LADDER.value)
@property
def global_rating_mean(self):
return self.rating_mean()
@property
def global_rating_deviation(self):
return self.rating_deviation()
@property
def ladder_rating_mean
|
(self):
return self.rating_mean(RatingType.LADDER.value)
@property
def ladder_rating_deviation(self):
return self.rating_deviation(RatingType.LADDER.value)
@property
def number_of_games(self):
count = 0
for rating_type in self.ratings:
count += self.ratings[rating_type].get("number_of_games", 0)
return count
def rating_estimate(self, rating_type=RatingType.GLOBAL.value):
"""
Get the conservative estimate of the player's trueskill rating
"""
try:
mean = self.ratings[rating_type]["rating"][0]
deviation = self.ratings[rating_type]["rating"][1]
return int(max(0, (mean - 3 * deviation)))
except (KeyError, IndexError):
return 0
def rating_mean(self, rating_type=RatingType.GLOBAL.value):
try:
return round(self.ratings[rating_type]["rating"][0])
except (KeyError, IndexError):
return 1500
def rating_deviation(self, rating_type=RatingType.GLOBAL.value):
try:
return round(self.ratings[rating_type]["rating"][1])
except (KeyError, IndexError):
return 500
def game_count(self, rating_type=RatingType.GLOBAL.value):
try:
return int(self.ratings[rating_type]["number_of_games"])
except KeyError:
return 0
def __repr__(self):
return self.__str__()
def __str__(self):
return (
"Player(id={}, login={}, global_rating={}, ladder_rating={})"
).format(
self.id,
self.login,
(self.global_rating_mean, self.global_rating_deviation),
(self.ladder_rating_mean, self.ladder_rating_deviation),
)
@property
def currentGame(self):
return self._currentGame
@transactional
def set_currentGame(self, game, _transaction=None):
if self.currentGame == game:
return
old = self._currentGame
self._currentGame = game
_transaction.emit(self.newCurrentGame, self, game, old)
@currentGame.setter
def currentGame(self, val):
# CAVEAT: this will emit signals immediately!
self.set_currentGame(val)
|
petermalcolm/osf.io
|
framework/tasks/__init__.py
|
Python
|
apache-2.0
| 1,133
| 0.004413
|
# -*- coding: utf-8 -*-
"""Asynchronous task queue module."""
from celery import Celery
from celery.utils.log import get_task_logger
from raven import Client
from raven.contrib.celery import register_signal
from website import settings
app = Celery()
# TODO: Hardcoded settings module. Should be set using framework's config handler
app.config_from_object('website.settings.celeryconfig')
if settings.SENTRY_DSN:
client = Client(settings.SENTRY_DSN)
register_signal(client)
@app.task
def error_handler(task_id, task_name):
"""logs detailed message about tasks that raise exceptions
:param task_id: TaskID of the failed task
:param task_name: name of task that failed
"""
# get the current logger
logger = get_tas
|
k_logger(__name__)
# query the broker for the AsyncResult
result = app.Asyn
|
cResult(task_id)
excep = result.get(propagate=False)
# log detailed error mesage in error log
logger.error('#####FAILURE LOG BEGIN#####\n'
'Task {0} raised exception: {0}\n\{0}\n'
'#####FAILURE LOG STOP#####'.format(task_name, excep, result.traceback))
|
buddyli/private2w
|
libs/pony/orm/__init__.py
|
Python
|
apache-2.0
| 28
| 0
|
from pony.orm.core im
|
port *
| |
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/autopep8/test/suite/out/E21.py
|
Python
|
mit
| 222
| 0
|
#: E211
spam(1)
#: E211 E211
dic
|
t['key'] = list[index]
#: E211
dict['key']['subkey'] = list[index]
#: Okay
spam(1)
dict['key'] = list[index]
# This is not prohibit
|
ed by PEP8, but avoid it.
class Foo (Bar, Baz):
pass
|
tualatrix/django-alipay
|
alipay/trade_create_by_buyer/ptn/signals.py
|
Python
|
gpl-3.0
| 149
| 0
|
from django.dispatch import S
|
ignal
# Sent when a payment is successfully processed.
alipay_ptn_successful = Signal()
alip
|
ay_ptn_flagged = Signal()
|
redmeros/Lean
|
Algorithm.Python/BasicTemplateAlgorithm.py
|
Python
|
apache-2.0
| 2,223
| 0.010806
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See
|
the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect
|
.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
import numpy as np
### <summary>
### Basic template algorithm simply initializes the date range and cash. This is a skeleton
### framework you can use for designing an algorithm.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class BasicTemplateAlgorithm(QCAlgorithm):
'''Basic template algorithm simply initializes the date range and cash'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10, 7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY", Resolution.Second)
self.Debug("numpy test >>> print numpy.pi: " + str(np.pi))
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if not self.Portfolio.Invested:
self.SetHoldings("SPY", 1)
|
arxcruz/tempest-tool
|
tempestmail/utils.py
|
Python
|
gpl-3.0
| 2,611
| 0.001532
|
import datetime
import re
import requests
import tempestmail.constants as constants
from six.moves.urllib.parse import urljoin
def compare_tests(failures):
''' Detect fails covered by bugs and new'''
covered, new = [], []
for fail in failures:
for test in constants.TESTS:
if re.search(test, fail):
covered.append(fail)
new = [fail for fail in failures if fail not in covered]
return covered, new
def get_html(url):
try:
resp = requests.get(url)
print(resp)
if resp is None:
raise Exception("Get None as result")
except Exception as e:
print("Exception %s" % str(e))
return
return resp
def get_tests_results(console):
''' Get results of tests from console'''
failed = [constants.TESTRE.search(l).group(1)
for l in console.splitlines() if constants.FAILED in l]
ok = [constants.TESTRE.search(l).group(1)
for l in console.splitlines() if constants.OK in l]
errors = [constants.TESTRE.s
|
earch(l).group(1)
for l in console.splitlines() if constants.ERROR in l]
# all_skipped = [TESTRE.search(l).group(1)
# for l in console.splitlines() if SKIPPED in l]
return failed, ok, errors
def get_console(job_url):
''' Get console page of job'''
def _good_result(res):
if res is None or int(res.status_code) not in (200, 404):
return False
else:
return True
def _get_date(c):
text = c.splitlines
|
()
# find last line with timestamp
for l in text[::-1]:
if constants.TIMEST.match(l):
return datetime.datetime.strptime(
constants.TIMEST.search(l).group(1),
"%Y-%m-%d %H:%M")
return None
url = urljoin(job_url, "console.html.gz")
res = get_html(url)
if not _good_result(res):
print("Error getting console %s" % url)
# Try again
res = get_html(url)
if not _good_result(res):
return (None, None, None)
elif int(res.status_code) == 404:
url = urljoin(job_url, "console.html")
res = get_html(url)
if not _good_result(res):
# Try again
res = get_html(url)
if not _good_result(res):
print("Error getting console %s" % url)
return (None, None, None)
console = res.content.decode('utf-8')
# with open("/tmp/console", "wt") as f:
# f.write(console)
date = _get_date(console)
return console, date, url
|
JohnLZeller/dd-agent
|
checks.d/mysql.py
|
Python
|
bsd-3-clause
| 15,405
| 0.003505
|
# stdlib
import subprocess
import os
import sys
import re
import traceback
# project
from checks import AgentCheck
from util import Platform
# 3rd party
import pymysql
GAUGE = "gauge"
RATE = "rate"
STATUS_VARS = {
'Connections': ('mysql.net.connections', RATE),
'Max_used_connections': ('mysql.net.max_connections', GAUGE),
'Open_files': ('mysql.performance.open_files', GAUGE),
'Table_locks_waited': ('mysql.performance.table_locks_waited', GAUGE),
'Threads_connected': ('mysql.performance.threads_connected', GAUGE),
'Innodb_data_reads': ('mysql.innodb.data_reads', RATE),
'Innodb_data_writes': ('mysql.innodb.data_writes', RATE),
'Innodb_os_log_fsyncs': ('mysql.innodb.os_log_fsyncs', RATE),
'Innodb_buffer_pool_size': ('mysql.innodb.buffer_pool_size', RATE),
'Slow_queries': ('mysql.performance.slow_queries', RATE),
'Questions': ('mysql.performance.questions', RATE),
'Queries': ('mysql.performance.queries', RATE),
'Com_select': ('mysql.performance.com_select', RATE),
'Com_insert': ('mysql.performance.com_insert', RATE),
'Com_update': ('mysql.performance.com_update', RATE),
'Com_delete': ('mysql.performance.com_delete', RATE),
'Com_insert_select': ('mysql.performance.com_insert_select', RATE),
'Com_update_multi': ('mysql.performance.com_update_multi', RATE),
'Com_delete_multi': ('mysql.performance.com_delete_multi', RATE),
'Com_replace_select': ('mysql.performance.com_replace_select', RATE),
'Qcache_hits':('mysql.performance.qcache_hits', RATE),
'Innodb_mutex_spin_waits': ('mysql.innodb.mutex_spin_waits', RATE),
'Innodb_mutex_spin_rounds': ('mysql.innodb.mutex_spin_rounds', RATE),
'Innodb_mutex_os_waits': ('mysql.innodb.mutex_os_waits', RATE),
'Created_tmp_tables': ('mysql.performance.created_tmp_tables', RATE),
'Created_tmp_disk_tables': ('mysql.performance.created_tmp_disk_tables', RATE),
'Created_tmp_files': ('mysql.performance.created_tmp_files', RATE),
'Innodb_row_lock_waits': ('mysql.innodb.row_lock_waits', RATE),
'Innodb_row_lock_time': ('mysql.innodb.row_lock_time', RATE),
'Innodb_current_row_locks': ('mysql.innodb.current_row_locks', GAUGE),
'Open_tables': ('mysql.performance.open_tables', GAUGE),
}
class MySql(AgentCheck):
SERVICE_CHECK_NAME = 'mysql.can_connect'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.mysql_version = {}
self.greater_502 = {}
def get_library_versions(self):
return {"pymysql": pymysql.__version__}
def check(self, instance):
host, port, user, password, mysql_sock, defaults_file, tags, options = self._get_config(instance)
if (not host or not user) and not defaults_file:
raise Exception("Mysql host and user are needed.")
db = self._connect(host, port, mysql_sock, user, password, defaults_file)
# Metric collection
self._collect_metrics(host, db, tags, options)
if Platform.is_linux():
self._collect_system_metrics(host, db, tags)
def _get_config(self, instance):
host = instance.get('server', '')
user = instance.get('user', '')
port = int(instance.get('port', 0))
password = instance.get('pass', '')
mysql_sock = instance.get('sock', '')
defaults_file = instance.get('defaults_file', '')
tags = instance.get('tags', None)
options = instance.get('options', {})
return host, port, user, password, mysql_sock, defaults_file, tags, options
def _connect(self, host, port, mysql_sock, user, password, defaults_file):
service_check_tags = [
'host:%s' % host,
'port:%s' % port
]
try:
if defaults_file != '':
db = pymysql.connect(read_default_file=defaults_file)
elif mysql_sock != '':
db = pymysql.connect(unix_socket=mysql_sock,
user=user,
passwd=password)
elif port:
db = pymysql.connect(host=host,
port=port,
user=user,
passwd=password)
else:
db = pymysql.connect(host=host,
user=user,
|
passwd=password)
self.log.debug("Connected to MySQL")
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags)
raise
return db
def _collect_metrics(self, host, db, tags, options):
cursor = db.cursor()
cursor.execute("SHO
|
W /*!50002 GLOBAL */ STATUS;")
status_results = dict(cursor.fetchall())
self._rate_or_gauge_statuses(STATUS_VARS, status_results, tags)
cursor.execute("SHOW VARIABLES LIKE 'Key%';")
variables_results = dict(cursor.fetchall())
cursor.close()
del cursor
#Compute key cache utilization metric
key_blocks_unused = self._collect_scalar('Key_blocks_unused', status_results)
key_cache_block_size = self._collect_scalar('key_cache_block_size', variables_results)
key_buffer_size = self._collect_scalar('key_buffer_size', variables_results)
key_cache_utilization = 1 - ((key_blocks_unused * key_cache_block_size) / key_buffer_size)
self.gauge("mysql.performance.key_cache_utilization", key_cache_utilization, tags=tags)
# Compute InnoDB buffer metrics
# Be sure InnoDB is enabled
if 'Innodb_page_size' in status_results:
page_size = self._collect_scalar('Innodb_page_size', status_results)
innodb_buffer_pool_pages_total = self._collect_scalar('Innodb_buffer_pool_pages_total', status_results)
innodb_buffer_pool_pages_free = self._collect_scalar('Innodb_buffer_pool_pages_free', status_results)
innodb_buffer_pool_pages_total = innodb_buffer_pool_pages_total * page_size
innodb_buffer_pool_pages_free = innodb_buffer_pool_pages_free * page_size
innodb_buffer_pool_pages_used = innodb_buffer_pool_pages_total - innodb_buffer_pool_pages_free
innodb_buffer_pool_pages_utilization = innodb_buffer_pool_pages_used / innodb_buffer_pool_pages_total
self.gauge("mysql.innodb.buffer_pool_free", innodb_buffer_pool_pages_free, tags=tags)
self.gauge("mysql.innodb.buffer_pool_used", innodb_buffer_pool_pages_used, tags=tags)
self.gauge("mysql.innodb.buffer_pool_total", innodb_buffer_pool_pages_total, tags=tags)
self.gauge("mysql.innodb.buffer_pool_utilization", innodb_buffer_pool_pages_utilization, tags=tags)
if 'galera_cluster' in options and options['galera_cluster']:
value = self._collect_scalar('wsrep_cluster_size', status_results)
self.gauge('mysql.galera.wsrep_cluster_size', value, tags=tags)
if 'replication' in options and options['replication']:
# get slave running form global status page
slave_running = self._collect_string('Slave_running', status_results)
if slave_running is not None:
if slave_running.lower().strip() == 'on':
slave_running = 1
else:
slave_running = 0
self.gauge("mysql.replication.slave_running", slave_running, tags=tags)
self._collect_dict(GAUGE, {"Seconds_behind_master": "mysql.replication.seconds_behind_master"}, "SHOW SLAVE STATUS", db, tags=tags)
def _rate_or_gauge_statuses(self, statuses, dbResults, tags):
for status, metric in statuses.iteritems():
metric_name, metric_type = metric
value = self._collect_scalar(status, dbResults)
if value is not None:
if metric_type == RATE:
|
tecan/xchat-rt
|
plugins/scripts/encryption/supybot-code-6361b1e856ebbc8e14d399019e2c53a35f4e0063/plugins/MoobotFactoids/__init__.py
|
Python
|
gpl-2.0
| 2,507
| 0.000399
|
###
# Copyright (c) 2003-2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Moobot factoid compatibility module. Moobot's factoids were originally
designed to emulate Blootbot's factoids, so in either case, you should find
this plugin comfortable.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "0.1"
__author__ = supybot.authors.strike
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin
|
is reloaded. Don't forget t
|
o import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=8 expandtab textwidth=78:
|
lopesivan/blackjack
|
cartas/nipe.py
|
Python
|
gpl-2.0
| 169
| 0
|
class Nipe(object):
def __init__(self, nome, simb
|
olo):
|
self.nome = nome
self.simbolo = simbolo
def __repr__(self):
return self.simbolo
|
gfgtdf/wesnoth-old
|
utils/dockerbuilds/mingw/get_dlls.py
|
Python
|
gpl-2.0
| 470
| 0.002128
|
#!/usr/bin/env python3
import pefile, pathlib, shutil
dlls = set()
dllpath = pathlib.Path('/windows/mingw64/bin')
pe_modules = set([pefile.P
|
E('wesnoth.exe')])
while pe_modules:
pe = pe_modules.pop()
for entry in pe.DIRECTORY_ENTRY_IMPORT:
path = dllpath / pathlib.Path(entry.dll
|
.decode())
if path not in dlls and path.exists():
dlls.add(path)
pe_modules.add(pefile.PE(path))
for dll in dlls:
shutil.copy(dll, ".")
|
Ripsnorta/pyui2
|
docs/demos/widgetdemo/widgetdemo.py
|
Python
|
lgpl-2.1
| 7,972
| 0.005394
|
###################################################################################
# Copyright (c) 2005 John Judd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###################################################################################
from optparse import OptionParser
import pyui2
from pyui2.themes import Theme
from pyui2.themes import Win2kTheme
from pyui2.themes import WinXPTheme
from pyui2.themes import OSXTheme
from pyui2.themes import ComicTheme
from labelpanel import LabelPanel
from buttonpanel import ButtonPanel
from checkboxpanel import CheckboxPanel
from editpanel import EditPanel
from gridpanel import GridPanel
from sheetpanel import SheetPanel
from treepanel import TreePanel
from picturepanel import PicturePanel
from listboxpanel import ListboxPanel
from sliderbarpanel import SliderBarPanel
#from menupanel import MenuPanel
#from captionbarpanel import CaptionBarPanel
from scrollpanel import ScrollPanel
from dropdownpanel import DropdownPanel
from radiopanel import RadioPanel
from splitterpanel import SplitterPanel
#############################################################################################################
##
#############################################################################################################
class WidgetDemo:
#########################################################################################################
##
#########################################################################################################
def __init__(self):
parser = OptionParser()
parser.add_option("-D", action="store", type="string", dest="deviceName", default="2d")
parser.add_option("-F", action="store_true", dest="fullscreen", default=False)
(options, args) = parser.parse_args()
pyui2.init(800, 600, options.deviceName, options.fullscreen, "Widget Demo")
self.themes = { "Standard" : Theme,
"Windows 2000" : Win2kTheme,
"Windows XP" : WinXPTheme,
"Mac OSX" : OSXTheme,
"Comic" : ComicTheme,
}
#########################################################################################################
##
#########################################################################################################
def setupMainFrame(self):
flags = (pyui2.widgets.Frame.NO_CAPTION, pyui2.widgets.Frame.NO_RESIZE)
self.mainFrame = pyui2.widgets.Frame(0, self.mbar.height, 800, 400, "", flags)
self.tabPanel = pyui2.widgets.TabbedPanel()
self.tabPanel.addPanel("Label", LabelPanel(self))
self.tabPanel.addPanel("Button", ButtonPanel(self))
self.tabPanel.addPanel("Checkbox", CheckboxPanel(self))
self.tabPanel.addPanel("Edit", EditPanel(self))
self.tabPanel.addPanel("Grid", GridPanel(self))
self.tabPanel.addPanel("Sheet", SheetPanel(self))
self.tabPanel.addPanel("Tree", TreePanel(self))
self.tabPanel.addPanel("Picture", PicturePanel(self))
self.tabPanel.addPanel("Listbox", ListboxPanel(self))
self.tabPanel.addPanel("Sliderbar", SliderBarPanel(self))
#self.tabPanel.addPanel("Menu", MenuPanel(self))
#self.tabPanel.addPanel("CaptionBar", CaptionBarPanel(self))
self.tabPanel.addPanel("Scroll", ScrollPanel(self))
self.tabPanel.addPanel("Dropdown", DropdownPanel(self))
self.tabPanel.addPanel("RadioButton", RadioPanel(self))
self.tabPanel.addPanel("Splitter", SplitterPanel(self))
self.mainFrame.replacePanel(self.tabPanel)
self.mainFrame.pack()
#########################################################################################################
##
#########################################################################################################
def setupLogFrame(self):
flags = (pyui2.widgets.Frame.NO_CAPTION, pyui2.widgets.Frame.NO_RESIZE)
self.logFrame = pyui2.widgets.Frame(0, self.mbar.height + 400, 800, 200 - self.mbar.height, "", flags)
self.logFrame.setLayout(pyui2.layouts.TableLayoutManager(21, 13))
self.logList = pyui2.widgets.ListBox()
self.logFrame.addChild(self.logList, (0, 0, 21, 10))
self.logFrame.addChild(pyui2.widgets.Button("Clear Log", self.onClearLog), (19, 11, 2, 2))
self.logFrame.pack()
#########################################################################################################
##
#########################################################################################################
def onClearLog(self, me
|
nuitem):
self.logList.clearAllItems()
#########################################################################################################
##
#########################################################################################################
def addToLog(self, text):
self.logList.addItem(t
|
ext, None)
#########################################################################################################
##
#########################################################################################################
def onThemeChange(self, menuitem):
self.currentTheme = menuitem.text
pyui2.desktop.setTheme(self.themes[menuitem.text]())
#########################################################################################################
##
#########################################################################################################
def onExit(self, menuitem):
pyui2.quit()
#########################################################################################################
##
#########################################################################################################
def setupMenu(self):
fileMenu = pyui2.widgets.Menu("File")
fileMenu.addItem("Exit", self.onExit)
themeMenu = pyui2.widgets.Menu("Themes")
for item in self.themes:
themeMenu.addItem(item, self.onThemeChange)
self.mbar = pyui2.widgets.MenuBar()
self.mbar.addMenu(fileMenu)
self.mbar.addMenu(themeMenu)
#########################################################################################################
##
#########################################################################################################
def run(self):
self.setupMenu()
self.setupMainFrame()
self.setupLogFrame()
pyui2.run()
pyui2.quit()
#############################################################################################################
if __name__ == '__main__':
app = WidgetDemo()
app.run()
|
AaronWatters/inferelator_strawman
|
inferelator_strawman/design_response_R.py
|
Python
|
bsd-2-clause
| 3,334
| 0.003599
|
"""
Compute design and response by calling R subprocess.
"""
import os
import subprocess
import pandas as pd
my_dir = os.path.dirname(__file__)
R_dir = os.path.join(my_dir, "R_code")
DR_module = os.path.join(R_dir, "design_and_response.R")
R_template = r"""
source('{module}')
meta.data <- read.table('{meta_file}', sep = ',', header = 1, row.names = 1)
exp.mat <- read.table('{exp_file}', sep = ',', header = 1, row.names = 1)
delT.min <- {delTmin}
delT.max <- {delTmax}
tau <- {tau}
dr <- design.and.response(meta.data, exp.mat, delT.min, delT.max, tau)
#dr$final_response_matrix
#dr$final_design_matrix
write.table(as.matrix(dr$final_response_matrix), '{response_file}', sep = '\t')
write.table(as.matrix(dr$final_design_matrix), '{design_file}', sep = '\t')
cat("done. \n")
"""
def save_R_driver(to_filename, delTmin=0, delTmax=110, tau=45,
meta_file="meta_data.csv", exp_file="exp_mat.csv",
module=DR_module, response_file='r
|
esponse.tsv', design_file='design.tsv'):
assert os.path.exists(DR_module), "doesn't exist " + repr(DR_module)
text = R_templ
|
ate.format(delTmin=delTmin, delTmax=delTmax, tau=tau,
meta_file=meta_file, exp_file=exp_file, module=module,
response_file=response_file, design_file=design_file)
with open(to_filename, "w") as outfile:
outfile.write(text)
return (to_filename, design_file, response_file)
def convert_to_R_df(df):
"""
Convert booleans to "TRUE" and "FALSE" so they will be read correctly from CSV
format by R.
"""
new_df = pd.DataFrame(df)
for col in new_df:
if new_df[col].dtype == 'bool':
new_df[col] = [str(x).upper() for x in new_df[col]]
return new_df
class DR_driver:
"""
Configurable container for calling R subprocess to
compute design and response.
"""
target_directory = "/tmp"
meta_file = "meta_data.csv"
exp_file = "exp_mat.csv"
script_file = "run_design_response.R"
response_file = "response.tsv"
design_file = "design.tsv"
delTmin = 0
delTmax = 110
tau = 45
def path(self, filename):
return os.path.join(self.target_directory, filename)
def run(self, expression_data_frame, metadata_dataframe):
exp = convert_to_R_df(expression_data_frame)
md = convert_to_R_df(metadata_dataframe)
exp.to_csv(self.path(self.exp_file))
md.to_csv(self.path(self.meta_file))
(driver_path, design_path, response_path) = save_R_driver(
to_filename=self.path(self.script_file),
delTmin=self.delTmin,
delTmax=self.delTmax,
tau=self.tau,
meta_file=self.path(self.meta_file),
exp_file=self.path(self.exp_file),
response_file=self.path(self.response_file),
design_file=self.path(self.design_file)
)
#subprocess.call(['R', '-f', driver_path])
command = "R -f " + driver_path
stdout = subprocess.check_output(command, shell=True)
assert stdout.strip().split()[-2:] == [b"done.", b">"], (
"bad stdout tail: " + repr(stdout.strip().split()[-2:])
)
final_design = pd.read_csv(design_path, sep='\t')
final_response = pd.read_csv(response_path, sep='\t')
return (final_design, final_response)
|
axelleonhart/TrainingDjango
|
materiales/apps/contratos/migrations/0003_auto_20170330_1333.py
|
Python
|
lgpl-3.0
| 432
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 19:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contratos', '0002_auto_20170330_1328'),
]
operations = [
migrations.RenameField(
model_name='contrato',
old_name='proveedor',
new_name='rfc',
)
|
,
]
|
|
jguelat/BirdChooser
|
bird_chooser_dialog.py
|
Python
|
gpl-2.0
| 3,907
| 0.003327
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
BirdChooserDialog
A QGIS plugin
Show bird observations
-------------------
begin : 2015-11-05
git sha : $Format:%H$
copyright : (C) 2015 by Jerome
email : jerome@guelat.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
import psycopg2
from PyQt4 import QtGui, uic
from qgis.core import QgsDataSourceURI, QgsVectorLayer, QgsMapLayerRegistry, QgsMarkerSymbolV2, QgsMessageLog
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'bird_chooser_dialog_base.ui'))
class BirdChooserDialog(Qt
|
Gui.QDialog, FORM_CLASS):
def __init__(self, iface, parent=None):
"""Constructor."""
super(BirdChooserDialog, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
|
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.iface = iface
# Connecter les slots
self._connectSlots()
#self.conn = psycopg2.connect(database = "jguelat", user = "jguelat", password = "")
self.conn = psycopg2.connect(service = "local_jguelat")
def _connectSlots(self):
self.tableCombo.activated.connect(self.getSpecies)
# Quand la fenetre est fermee (d'une maniere ou d'une autre)
self.finished.connect(self.closeConnection)
self.addLayerButton.clicked.connect(self.addLayer)
def getSpecies(self):
self.speciesCombo.clear()
cur = self.conn.cursor()
cur.execute("SELECT DISTINCT species_id from " + self.tableCombo.currentText() + " ORDER BY species_id")
rows = cur.fetchall()
self.speciesCombo.addItems([str(elem[0]) for elem in rows])
self.addLayerButton.setEnabled(True)
cur.close()
def addLayer(self):
uri = QgsDataSourceURI()
# set host name, port, database name, username and password
#uri.setConnection("localhost", "5432", "jguelat", "jguelat", "")
uri.setConnection("local_jguelat", "", "", "")
# set database schema, table name, geometry column and optionally subset (WHERE clause)
uri.setDataSource("public", self.tableCombo.currentText(), "geom", "species_id = " + self.speciesCombo.currentText())
#vlayer = self.iface.addVectorLayer(uri.uri(), "Species " + self.speciesCombo.currentText(), "postgres")
vlayer = QgsVectorLayer(uri.uri(), "Species " + self.speciesCombo.currentText(), "postgres")
props = vlayer.rendererV2().symbol().symbolLayer(0).properties()
props['size'] = '3'
props['color'] = 'blue'
vlayer.rendererV2().setSymbol(QgsMarkerSymbolV2.createSimple(props))
QgsMapLayerRegistry.instance().addMapLayer(vlayer)
QgsMessageLog.logMessage("Tout est OK", 'BirdChooser', QgsMessageLog.INFO)
def closeConnection(self):
self.conn.close()
|
punchagan/zulip
|
zerver/tests/test_custom_profile_data.py
|
Python
|
apache-2.0
| 35,581
| 0.002108
|
from typing import Any, Dict, List, Union
from unittest import mock
import orjson
from zerver.lib.actions import (
do_remove_realm_custom_profile_field,
do_update_user_custom_profile_data_if_changed,
try_add_realm_custom_profile_field,
try_reorder_realm_custom_profile_fields,
)
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.markdown import markdown_convert
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import queries_captured
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
custom_profile_fields_for_realm,
get_realm
|
,
)
class CustomProfileFieldTestCase(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.realm = get_realm("zuli
|
p")
self.original_count = len(custom_profile_fields_for_realm(self.realm.id))
def custom_field_exists_in_realm(self, field_id: int) -> bool:
fields = custom_profile_fields_for_realm(self.realm.id)
field_ids = [field.id for field in fields]
return field_id in field_ids
class CreateCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_create(self) -> None:
self.login("iago")
realm = get_realm("zulip")
data: Dict[str, Any] = {"name": "Phone", "field_type": "text id"}
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'Argument "field_type" is not valid JSON.')
data["name"] = ""
data["field_type"] = 100
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Label cannot be blank.")
data["name"] = "*" * 41
data["field_type"] = 100
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "name is too long (limit: 40 characters)")
data["name"] = "Phone"
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Invalid field type.")
data["name"] = "Phone"
data["hint"] = "*" * 81
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
msg = "hint is too long (limit: 80 characters)"
self.assert_json_error(result, msg)
data["name"] = "Phone"
data["hint"] = "Contact number"
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
field = CustomProfileField.objects.get(name="Phone", realm=realm)
self.assertEqual(field.id, field.order)
data["name"] = "Name "
data["hint"] = "Some name"
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
field = CustomProfileField.objects.get(name="Name", realm=realm)
self.assertEqual(field.id, field.order)
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "A field with that label already exists.")
def test_create_select_field(self) -> None:
self.login("iago")
data: Dict[str, Union[str, int]] = {}
data["name"] = "Favorite programming language"
data["field_type"] = CustomProfileField.SELECT
data["field_data"] = "invalid"
result = self.client_post("/json/realm/profile_fields", info=data)
error_msg = "Bad value for 'field_data': invalid"
self.assert_json_error(result, error_msg)
data["field_data"] = orjson.dumps(
{
"python": ["1"],
"java": ["2"],
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "field_data is not a dict")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python"},
"java": {"text": "Java"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "order key is missing from field_data")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": ""},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["order"] cannot be blank.')
data["field_data"] = orjson.dumps(
{
"": {"text": "Python", "order": "1"},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "'value' cannot be blank.")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": 1},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["order"] is not a string')
data["field_data"] = orjson.dumps({}).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Field must have at least one choice.")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": "1"},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
def test_create_default_external_account_field(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field_type: int = CustomProfileField.EXTERNAL_ACCOUNT
field_data: str = orjson.dumps(
{
"subtype": "twitter",
}
).decode()
invalid_field_name: str = "Not required field name"
invalid_field_hint: str = "Not required field hint"
result = self.client_post(
"/json/realm/profile_fields",
info=dict(
field_type=field_type,
field_data=field_data,
hint=invalid_field_hint,
name=invalid_field_name,
),
)
self.assert_json_success(result)
# Silently overwrite name and hint with values set in default fields dict
# for default custom external account fields.
with self.assertRaises(CustomProfileField.DoesNotExist):
field = CustomProfileField.objects.get(name=invalid_field_name, realm=realm)
# The field is created with 'Twitter' name as per values in default fields dict
field = CustomProfileField.objects.get(name="Twitter")
self.assertEqual(field.name, DEFAULT_EXTERNAL_ACCOUNTS["twitter"]["name"])
self.assertEqual(field.hint, DEFAULT_EXTERNAL_ACCOUNTS["twitter"]["hint"])
result = self.client_delete(f"/json/realm/profile_fields/{field.id}")
self.assert_json_success(result)
# Should also work without name or hint and only external field type and subtype data
result = self.client_post(
"/json/realm/profile_fields", info=dict(field_type=field_type, field_data=field_data)
)
self.assert_json_success(result)
# Default external account field data cannot be updated
field = CustomProfileField.objects.get(name="Twitter", realm=realm)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Twitter username", "field_type": CustomProfileField.EXTERNAL_ACCOUNT},
)
self.assert_json_error(result, "Default custom field cannot be updated.")
result = self.client_delete(
|
pombredanne/pythran
|
pythran/tests/openmp.legacy/omp_parallel_for_lastprivate.py
|
Python
|
bsd-3-clause
| 279
| 0.003584
|
def omp_parallel_for_lastprivate():
sum = 0
i0 = -1
'omp parallel for reduction(+:sum) schedule(static,7) lastprivate(i0)'
for i in range(1,1001):
sum += i
i0 = i
|
known_sum = (1000 * (1000 + 1)) / 2
return
|
known_sum == sum and i0 == 1000
|
jonathanslenders/pyvim
|
pyvim/commands/completer.py
|
Python
|
bsd-3-clause
| 1,985
| 0.001008
|
from __future__ import unicode_literals
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.completion import WordCompleter, PathCompleter
from prompt_toolkit.contrib.completers.system import SystemCompleter
from prompt_toolkit.contrib.regular_languages.completion import GrammarCompleter
from .grammar import COMMAND_GRAMMAR
from .commands import get_commands, SET_COMMANDS
__all__ = (
'create_command_completer',
)
def create_command_completer(editor):
|
commands = [c + ' ' for c in get_commands()]
return GrammarCompleter(COMMAND_GRAMMAR, {
'command': WordCompleter(commands),
'location': PathCompleter(expanduser=True),
'set_option': WordCompleter(sorted(SET_COMMANDS)),
'buffer_name': BufferNameCompleter(editor),
'colorscheme': ColorSchemeCompleter(editor),
'shell_command': SystemCompleter(),
})
class BufferNameCompleter(Completer):
"""
Complete on buffer names.
It
|
is sufficient when the input appears anywhere in the buffer name, to
trigger a completion.
"""
def __init__(self, editor):
self.editor = editor
def get_completions(self, document, complete_event):
text = document.text_before_cursor
for eb in self.editor.window_arrangement.editor_buffers:
location = eb.location
if location is not None and text in location:
yield Completion(location, start_position=-len(text), display=location)
class ColorSchemeCompleter(Completer):
"""
Complete on the names of the color schemes that are currently known to the
Editor instance.
"""
def __init__(self, editor):
self.editor = editor
def get_completions(self, document, complete_event):
text = document.text_before_cursor
for style_name in self.editor.styles:
if style_name.startswith(text):
yield Completion(style_name[len(text):], display=style_name)
|
cloudfoundry-incubator/bosh-vsphere-cpi-release
|
scripts/pyvmomi_to_ruby/gen_server_objects.py
|
Python
|
apache-2.0
| 219
| 0.013699
|
#!/usr/bin/env python
print("""# ******* WARNING - AUTO GENERATED CODE - DO NOT EDIT *******
module VimSdk
module VmomiSupport
""
|
")
import ServerObjects
import PbmObjects
import SmsObjects
print(""" en
|
d
end
""")
|
Trophime/singularity
|
libexec/python/tests/test_json.py
|
Python
|
bsd-3-clause
| 9,089
| 0.022335
|
'''
test_json.py: Singularity Hub testing functions for Singularity in Python
Copyright (c) 2016-2017, Vanessa Sochat. All rights reserved.
"Singularity" Copyright (c) 2016, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of any
required approvals from the U.S. Dept. of Energy). All rights reserved.
This software is licensed under a customized 3-clause BSD license. Please
consult LICENSE file distributed with the sources of this project regarding
your rights to use or distribute this software.
NOTICE. This Software was developed under funding from the U.S. Department of
Energy and the U.S. Government consequently retains certain rights. As such,
the U.S. Government has been granted for itself and others acting on its
behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software
to reproduce, distribute copies to the public, prepare derivative works, and
perform publicly and display publicly, and to permit other to do so.
'''
import os
import sys
sys.path.append('..') # directory with helpers
from unittest import TestCase
import shutil
import tempfile
from subprocess import (
Popen,
PIPE,
STDOUT
)
VERSION = sys.version_info[0]
print("*** PYTHON VERSION %s UTIL HELPERS TESTING START ***" %(VERSION))
class TestJson(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.here = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
self.file = "%s/meatballs.json" %(self.tmpdir)
print("\n---START----------------------------------------")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("---END------------------------------------------")
def format_keyname(self):
'''test the function to format the key name
'''
from helpers.json.main import format_keyname
print("Testing formatting of key function.")
print('Case 1: Testing that key returns all caps')
key = format_keyname('dry_meatball')
self.assertEqual(key,'DRY_MEATBALL')
print('Case 2: Testing that key replaced invalid characters with _')
key = format_keyname('big!!meatball)#$FTW')
self.assertEqual(key,'DRY_MEATBALL_FTW')
key = format_keyname('ugly-meatball')
self.assertEqual(key,'UGLY_MEATBALL')
def test_get(self):
'''test_get will test the get function
'''
print('Testing json GET')
print('Case 1: Get exiting key')
from sutils import write_json
write_json({"PASTA":"rigatoni!"},self.file)
self.assertTrue(os.path.exists(self.file))
script_path = "%s/helpers/json/get.py" %(self.here)
if VERSION == 2:
testing_command = ["python2",script_path,'--key','PASTA','--file',self.file]
else:
testing_command = ["python3",script_path,'--key','PASTA','--file',self.file]
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
self.assertEqual(result['return_code'],0)
output = result[
|
'message']
if isinstance(output,bytes):
output = output.decode(encoding='UTF-8')
self.assertEqual('rigatoni!',output.strip('\n').split('\n')[-1])
print('Case 2: Get non-existing key exits')
if VERSION == 2:
testing_
|
command = ["python2",script_path,'--key','LASAGNA','--file',self.file]
else:
testing_command = ["python3",script_path,'--key','LASAGNA','--file',self.file]
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
self.assertEqual(result['return_code'],1)
def test_add_delete(self):
'''test_add_delete will test the add and delete functions
'''
print('Testing json ADD')
from sutils import write_json, read_json
print('Case 1: Adding to new file, force not needed')
self.assertTrue(os.path.exists(self.file)==False)
script_path = "%s/helpers/json/add.py" %(self.here)
if VERSION == 2:
testing_command = ["python2",script_path,'--key','LEGO','--value','RED','--file',self.file]
else:
testing_command = ["python3",script_path,'--key','LEGO','--value','RED','--file',self.file]
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
self.assertEqual(result['return_code'],0)
self.assertTrue(os.path.exists(self.file))
# Check the contents
contents = read_json(self.file)
self.assertTrue('LEGO' in contents)
self.assertTrue(contents['LEGO'] == 'RED')
print('Case 2: Adding to existing key without force should error.')
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
self.assertEqual(result['return_code'],1)
print('Case 3: Adding to existing key with force should work.')
if VERSION == 2:
testing_command = ["python2",script_path,'--key','LEGO','--value','BLUE','--file',self.file,'-f']
else:
testing_command = ["python3",script_path,'--key','LEGO','--value','BLUE','--file',self.file,'-f']
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
self.assertEqual(result['return_code'],0)
# Check the updated contents
contents = read_json(self.file)
self.assertTrue('LEGO' in contents)
self.assertTrue(contents['LEGO'] == 'BLUE')
if VERSION == 2:
testing_command = ["python2",script_path,'--key','PASTA','--value','rigatoni!','--file',self.file]
else:
testing_command = ["python3",script_path,'--key','PASTA','--value','rigatoni!','--file',self.file]
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
print('Case 4: Deleting key from file')
script_path = "%s/helpers/json/delete.py" %(self.here)
if VERSION == 2:
testing_command = ["python2",script_path,'--key','LEGO','--file',self.file]
else:
testing_command = ["python3",script_path,'--key','LEGO','--file',self.file]
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
self.assertEqual(result['return_code'],0)
# Check the key was deleted contents
contents = read_json(self.file)
self.assertTrue('LEGO' not in contents)
print('Case 5: Checking that empty file is removed.')
if VERSION == 2:
testing_command = ["python2",script_path,'--key','PASTA','--file',self.file]
else:
testing_command = ["python3",script_path,'--key','PASTA','--file',self.file]
output = Popen(testing_command,stderr=PIPE,stdout=PIPE)
t = output.communicate()[0],output.returncode
result = {'message':t[0],
'return_code':t[1]}
self.assertTrue(os.path.exists(self.file)==False)
def test_dump(self):
'''test_add_delete will test the add and delete functions
'''
print('Testing json DUMP')
from sutils import write_json, read_json
print('Case 1: Dumping file.')
jsondump = {'HELLO':'KITTY',
'BATZ':'MARU',
'MY':'MELODY' }
write_json(jsondump,self.file
|
lucastheis/c2s
|
scripts/c2s-leave-one-out.py
|
Python
|
mit
| 3,513
| 0.022773
|
#!/usr/bin/env python
"""
Measure the performance of STM based spike prediction by repeatedly
using all but one cell for training and the remaining cell for testing.
"""
import os
import sys
from argparse import ArgumentParser
from pickle import dump
from scipy.io import savemat
from numpy import mean, std, corrcoef, sqrt, unique
from numpy.random import rand, randint
from cmt.utils import random_select
from c2s import load_data, train, predict, preprocess
from c2s.experiment import Experiment
from c2s.utils import convert
def main(argv):
parser = ArgumentParser(argv[0], description=__doc__)
parser.add_argument('dataset', type=str, nargs='+')
parser.add_argument('output', type=str)
parser.add_argument('--num_components', '-c', type=int, default=3)
parser.add_argument('--num_features', '-f', type=int, default=2)
parser.add_argument('--num_models', '-m', type=int, default=4)
parser.add_argument('--keep_all', '-k', type=int, default=1)
parser.add_argument('--finetune', '-n', type=int, default=0)
parser.add_argument('--num_valid', '-s', type=int, default=0)
parser.add_argument('--var_explained', '-e', type=float, default=95.)
parser.add_argument('--window_length', '-w', type=float, default=1000.)
parser.add_argument('--regularize', '-r', type=float, default=0.)
parser.add_argument('--preprocess', '-p', type=int, default=0)
parser.add_argument('--verbosity', '-v', type=int, default=1)
args, _ = parser.parse_known_args(argv[1:])
experiment = Experiment()
# load data
data = []
for dataset in args.dataset:
data = data + load_data(dataset)
# preprocess data
if args.preprocess:
data = preprocess(data)
# list of all cells
if 'cell_num' in data[0]:
# several trials/entries may belong to the same cell
cells = unique([entry['cell_num'] for entry in data])
else:
# one cell corresponds to one trial/entry
cells = range(len(data))
for i in cells:
data[i]['cell_num'] = i
for i in cells:
data_train = [entry for entry in data if entry['cell_num'] != i]
data_test = [entry for entry in data if entry['cell_num'] == i]
if args.verbosity > 0:
print 'Test cell: {0}'.format(i)
# train on all cells but cell i
results = train(
data=data_train,
num_valid=args.num_valid,
num_models=args.num_models,
var_explained=args.var_explained,
window_length=args.window_length,
keep_all=args.keep_all,
finetune=args.finetune,
model_parameters={
'num_components': args.num_components,
'num_features': args.num_features},
training_parameters={
'verbosity': 0},
regularize=args.regularize,
verbosity=1)
if args.verbosity > 0:
print 'Predicting...'
# predict responses of cell i
predictions = predict(data_test, results, verbosity=0)
for entry1, entry2 in zip(data_test, predictions):
entry1['predictions'] = entry2['predictions']
# remove data except predictions
for entry in data:
if 'spikes' in entry:
del entry['spikes']
if 'spike_times' in entry:
del entry['spike_times']
|
del entry['calcium']
# save results
if args.output.lower().endswith('.mat'):
savemat(args.output, convert({'data': data}))
elif args.output.lower().endswith('.xpck'):
experiment['args'] = args
experiment['data'] =
|
data
experiment.save(args.output)
else:
with open(args.output, 'w') as handle:
dump(data, handle, protocol=2)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Alecto3-D/testable-greeter
|
bb-master/sandbox/lib/python3.5/site-packages/autobahn/wamp/types.py
|
Python
|
mit
| 36,554
| 0.003146
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import six
from autobahn.util import public
from autobahn.wamp.request import Subscription, Registration
__all__ = (
'ComponentConfig',
'HelloReturn',
'Accept',
'Deny',
'Challenge',
'HelloDetails',
'SessionDetails',
'CloseDetails',
'SubscribeOptions',
'EventDetails',
'PublishOptions',
'RegisterOptions',
'CallDetails',
'CallOptions',
'CallResult',
'EncodedPayload'
)
@public
class ComponentConfig(object):
"""
WAMP application component configuration. An instance of this class is
provided to the constructor of :class:`autobahn.wamp.protocol.ApplicationSession`.
"""
__slots__ = (
'realm',
'extra',
'keyring',
'controller',
'shared'
)
def __init__(self, realm=None, extra=None, keyring=None, controller=None, shared=None):
"""
:param realm: The realm the session would like to join or ``None`` to let the router
auto-decide the realm (if the router is configured and allowing to do so).
:type realm: str
:param extra: Optional user-supplied object with extra configuration.
This can be any object you like, and is accessible in your
`ApplicationSession` subclass via `self.config.extra`. `dict` is
a good default choice. Important: if the component is to be hosted
by Crossbar.io, the supplied value must be JSON serializable.
:type extra: arbitrary
:param keyring: A mapper from WAMP URIs to "from"/"to" Ed25519 keys. When using
WAMP end-to-end encryption, application payload is encrypted using a
symmetric message key, which in turn is encrypted using the "to" URI (topic being
published to or procedure being called) public key and the "from" URI
private key. In both cases, the key for the longest matching URI is used.
:type keyring: obj implementing IKeyRing or None
:param controller: A WAMP ApplicationSession instance that holds a session to
a controlling entity. This optional feature needs to be supported by a WAMP
component hosting run-time.
:type controller: instance of ApplicationSession or None
:param shared: A dict object to exchange user information or hold user objects shared
between components run under the same controlling entity. This optional feature
needs to be supported by a WAMP component hosting run-time. Use with caution, as using
this feature can introduce coupling between components. A valid
|
use case would be
to hold a shared database connection pool.
:type shared: dict or None
"""
assert(realm is None or type(realm) == six.text_type)
# assert(keyring is None or ...) # FIXME
self.realm = realm
self.extra = extra
self.keyring = keyring
self.controller = controller
self.shared = shared
def __st
|
r__(self):
return u"ComponentConfig(realm=<{}>, extra={}, keyring={}, controller={}, shared={})".format(self.realm, self.extra, self.keyring, self.controller, self.shared)
@public
class HelloReturn(object):
"""
Base class for ``HELLO`` return information.
"""
@public
class Accept(HelloReturn):
"""
Information to accept a ``HELLO``.
"""
__slots__ = (
'realm',
'authid',
'authrole',
'authmethod',
'authprovider',
'authextra',
)
def __init__(self, realm=None, authid=None, authrole=None, authmethod=None, authprovider=None, authextra=None):
"""
:param realm: The realm the client is joined to.
:type realm: str
:param authid: The authentication ID the client is assigned, e.g. ``"joe"`` or ``"joe@example.com"``.
:type authid: str
:param authrole: The authentication role the client is assigned, e.g. ``"anonymous"``, ``"user"`` or ``"com.myapp.user"``.
:type authrole: str
:param authmethod: The authentication method that was used to authenticate the client, e.g. ``"cookie"`` or ``"wampcra"``.
:type authmethod: str
:param authprovider: The authentication provider that was used to authenticate the client, e.g. ``"mozilla-persona"``.
:type authprovider: str
:param authextra: Application-specific authextra to be forwarded to the client in `WELCOME.details.authextra`.
:type authextra: dict
"""
assert(realm is None or type(realm) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
assert(authextra is None or type(authextra) == dict)
self.realm = realm
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
self.authextra = authextra
def __str__(self):
return u"Accept(realm=<{}>, authid=<{}>, authrole=<{}>, authmethod={}, authprovider={}, authextra={})".format(self.realm, self.authid, self.authrole, self.authmethod, self.authprovider, self.authextra)
@public
class Deny(HelloReturn):
"""
Information to deny a ``HELLO``.
"""
__slots__ = (
'reason',
'message',
)
def __init__(self, reason=u'wamp.error.not_authorized', message=None):
"""
:param reason: The reason of denying the authentication (an URI, e.g. ``u'wamp.error.not_authorized'``)
:type reason: str
:param message: A human readable message (for logging purposes).
:type message: str
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
self.reason = reason
self.message = message
def __str__(self):
return u"Deny(reason=<{}>, message='{}')".format(self.reason, self.message)
@public
class Challenge(HelloReturn):
"""
Information to challenge the client upon ``HELLO``.
"""
__slots__ = (
'method',
'extra',
)
def __init__(self, method, extra=None):
"""
:param method: The authentication method for the challenge (e.g. ``"wampcra"``).
:type method: str
:param extra: Any extra information for the authentication challenge. This is
specific to the authentication method.
:type extra: dict
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
self.method = method
self.extra = extra or {}
def __str__(s
|
oldani/nanodegree-blog
|
app/models/comment.py
|
Python
|
mit
| 422
| 0
|
from datetime import datetime
from .base import BaseModel
class Comment(BaseModel):
def __init__(self, **kwargs):
# Add created and updated attrs by default.
|
self.created = self.updated = datetime.now()
super().__i
|
nit__(**kwargs)
def update(self):
""" Extends update method to update some fields before saving. """
self.updated = datetime.now()
super().update()
|
jaredmcqueen/hot-wheels-radar-gun
|
getSerialData.py
|
Python
|
mit
| 779
| 0
|
import serial
import time
import datetime
import csv
ser = serial.Serial('/dev/c
|
u.usbmodem1411', 9600, timeout=0)
my_file = 'data.csv'
f = open(my_file, 'wt')
writer = csv.writer(f)
writer.writerow(('dateTime', 'analogValue')) # csv column headers
while True:
try:
# write a csv row
writer.writerow((datetime.datetime.now(), ser.readline().strip()))
# print out to screen
# TODO: make the prints only once a second, stdout can't keep up
analog_value = ser.readline().strip()
|
if analog_value:
line = '%s,%s\n' % (datetime.datetime.now(), analog_value)
print line.strip()
time.sleep(.001)
except ser.SerialTimeoutException:
print('Data could not be read')
time.sleep(1)
|
pferreir/indico-backup
|
indico/MaKaC/plugins/EPayment/yellowPay/__init__.py
|
Python
|
gpl-3.0
| 882
| 0.013605
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free s
|
oftware; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Ind
|
ico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
__metadata__ = {
'type': "EPayment",
'name': "YellowPay"
}
MODULE_ID = 'YellowPay'
modules = {}
|
moreus/hadoop
|
hadoop-0.11.2/src/contrib/abacus/examples/pyAbacus/JythonAbacus.py
|
Python
|
apache-2.0
| 2,577
| 0.014358
|
#
# Copyright 2006 The Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ht
|
tp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limit
|
ations under the License.
#
from org.apache.hadoop.fs import Path
from org.apache.hadoop.io import *
from org.apache.hadoop.mapred import *
from org.apache.hadoop.abacus import *
from java.util import *;
import sys
class AbacusMapper(ValueAggregatorMapper):
def map(self, key, value, output, reporter):
ValueAggregatorMapper.map(self, key, value, output, reporter);
class AbacusReducer(ValueAggregatorReducer):
def reduce(self, key, values, output, reporter):
ValueAggregatorReducer.reduce(self, key, values, output, reporter);
class AbacusCombiner(ValueAggregatorCombiner):
def reduce(self, key, values, output, reporter):
ValueAggregatorCombiner.reduce(self, key, values, output, reporter);
def printUsage(code):
print "Abacus <input> <output> <numOfReducers> <inputformat> <specfile>"
sys.exit(code)
def main(args):
if len(args) < 6:
printUsage(1);
inDir = args[1];
outDir = args[2];
numOfReducers = int(args[3]);
theInputFormat = args[4];
specFile = args[5];
print "numOfReducers: ", numOfReducers, "theInputFormat: ", theInputFormat, "specFile: ", specFile
conf = JobConf(AbacusMapper);
conf.setJobName("recordcount");
conf.addDefaultResource(Path(specFile));
if theInputFormat=="textinputformat":
conf.setInputFormat(TextInputFormat);
else:
conf.setInputFormat(SequenceFileInputFormat);
conf.setOutputFormat(TextOutputFormat);
conf.setMapOutputKeyClass(Text);
conf.setMapOutputValueClass(Text);
conf.setOutputKeyClass(Text);
conf.setOutputValueClass(Text);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(numOfReducers);
conf.setMapperClass(AbacusMapper);
conf.setCombinerClass(AbacusCombiner);
conf.setReducerClass(AbacusReducer);
conf.setInputPath(Path(args[1]))
conf.setOutputPath(Path(args[2]))
JobClient.runJob(conf);
if __name__ == "__main__":
main(sys.argv)
|
pombredanne/mopidy-webhooks
|
mopidy_webhooks/__init__.py
|
Python
|
apache-2.0
| 877
| 0
|
from __future__ im
|
port unicode_literals
import logging
import os
from mopidy import config, ext
__version__ = '0.3.0'
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-Webhooks'
ext_name = 'webhooks'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
retur
|
n config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['api_key'] = config.String()
schema['api_key_header_name'] = config.String()
schema['status_update_interval'] = config.Integer()
schema['webhook_url'] = config.String()
return schema
def setup(self, registry):
from .frontend import WebhookFrontend
registry.add('frontend', WebhookFrontend)
|
zachary-williamson/ITK
|
Modules/ThirdParty/pygccxml/src/pygccxml/parser/etree_scanner.py
|
Python
|
apache-2.0
| 1,759
| 0.000569
|
# Copyright 2014-2015 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
from . import scanner
# keep py2exe happy
import xml.etree.ElementTree
import xml.etree.cElementTree as ElementTree
class etree_saxifier_t(object):
def __init__(self, etree, handler):
self.__root_elem = etree.getroot()
self.__handler = handler
def saxify(self):
self.__handler.startDocument()
self.__recursive_saxify(self.__root_elem)
self.__handler.endDocument()
def __recursive_saxify(self, element):
self.__handler.startElement(element.tag, element.attrib)
for e in element:
self.__recursive_saxify(e)
self.__handler.endElement(element.tag)
class etree_scanner_t(scanner.scanner_t):
def __init__(self, xml_file, decl_factory, *args):
scanner.scanner_t.__init__(self, xml_file, decl_factory, *args)
def read(self)
|
:
tree = ElementTree.parse(self.xml_file)
saxifier = etree_saxifier_t(tree, self)
saxifier.saxify()
class ietree_scanner_t(scanner.scanner_t):
def __init__(self, xml_file, decl_factory, *args):
scanner.scanner_t.__init__(self, xml_file, decl_factory, *args)
def read(self):
context = ElementTree.iterparse(
self.xml_file,
events=(
"start",
"end"))
for
|
event, elem in context:
if event == 'start':
self.startElement(elem.tag, elem.attrib)
else:
self.endElement(elem.tag)
elem.clear()
self.endDocument()
etree_scanner_t = ietree_scanner_t
|
v-iam/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/ip_configuration.py
|
Python
|
mit
| 2,982
| 0.001677
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class IPConfiguration(SubResource):
"""IPConfiguration.
:param id: Resource ID.
:type id: str
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP allocation method.
Possible values are 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or :class:`IPAllocationMethod
<azure.mgmt.network.v2016_12_01.models.IPAllocationMethod>`
:param subnet: The reference of the subnet resource.
:type subnet: :class:`Subnet
<azure.mgmt.network.v2016_12_01.models.Subnet>`
:param public_ip_address: The reference of the public IP resource.
:type public_ip_address: :class:`PublicIPAddress
<azure.mgmt.network.v2016_12_01.models.PublicIPAddress>`
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __i
|
nit__(self, id=None, private_ip_address=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_s
|
tate=None, name=None, etag=None):
super(IPConfiguration, self).__init__(id=id)
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
loopingz/nuxeo-drive
|
nuxeo-drive-client/nxdrive/updater.py
|
Python
|
lgpl-2.1
| 17,952
| 0.001337
|
"""Application update utilities using esky"""
import sys
import errno
import json
from urlparse import urljoin
from urllib2 import URLError
from urllib2 import HTTPError
import socket
from esky import Esky
from esky.errors import EskyBrokenError
from nxdrive.logging_config import get_logger
from nxdrive.engine.workers import PollWorker
from nxdrive.engine.activity import Action
from nxdrive.commandline import DEFAULT_UP
|
DATE_CHECK_DELAY
from nxdriv
|
e.utils import version_compare
from PyQt4 import QtCore
log = get_logger(__name__)
# Update statuses
UPDATE_STATUS_UPGRADE_NEEDED = 'upgrade_needed'
UPDATE_STATUS_DOWNGRADE_NEEDED = 'downgrade_needed'
UPDATE_STATUS_UPDATE_AVAILABLE = 'update_available'
UPDATE_STATUS_UPDATING = 'updating'
UPDATE_STATUS_UP_TO_DATE = 'up_to_date'
UPDATE_STATUS_UNAVAILABLE_SITE = 'unavailable_site'
UPDATE_STATUS_MISSING_INFO = 'missing_info'
UPDATE_STATUS_MISSING_VERSION = 'missing_version'
DEFAULT_SERVER_MIN_VERSION = '5.6'
class UnavailableUpdateSite(Exception):
pass
class MissingUpdateSiteInfo(Exception):
pass
class MissingCompatibleVersion(Exception):
pass
class UpdateError(Exception):
pass
class RootPrivilegeRequired(Exception):
pass
class AppUpdater(PollWorker):
"""Class for updating a frozen application.
Basically an Esky wrapper.
"""
refreshStatus = QtCore.pyqtSignal()
_doUpdate = QtCore.pyqtSignal(str)
appUpdated = QtCore.pyqtSignal(str)
updateAvailable = QtCore.pyqtSignal()
def __init__(self, manager, version_finder=None, check_interval=DEFAULT_UPDATE_CHECK_DELAY,
esky_app=None, local_update_site=False):
super(AppUpdater, self).__init__(check_interval)
self.refreshStatus.connect(self._poll)
self._doUpdate.connect(self._update)
self._manager = manager
self._enable = False
if esky_app is not None:
self.esky_app = esky_app
self._enable = True
elif not hasattr(sys, 'frozen'):
log.debug("Application is not frozen, cannot build Esky"
" instance, as a consequence update features"
" won't be available")
elif version_finder is None:
log.debug("Cannot initialize Esky instance with no"
" version finder, as a consequence update"
" features won't be available")
else:
try:
executable = sys.executable
log.debug("Application is frozen, building Esky instance from"
" executable %s and version finder %s",
executable, version_finder)
self.esky_app = Esky(executable, version_finder=version_finder)
self._enable = True
except EskyBrokenError as e:
log.error(e, exc_info=True)
log.debug("Error initializing Esky instance, as a"
" consequence update features won't be"
" available")
self.local_update_site = local_update_site
if self._enable:
self.update_site = self.esky_app.version_finder.download_url
if not self.local_update_site and not self.update_site.endswith('/'):
self.update_site = self.update_site + '/'
self.last_status = (UPDATE_STATUS_UP_TO_DATE, None)
def get_status(self):
return self.last_status
def force_status(self, status, version):
if status == 'updating':
# Put a percentage
self.last_status = (status, version, 40)
else:
self.last_status = (status, version)
def refresh_status(self):
if self._enable:
self.refreshStatus.emit()
@QtCore.pyqtSlot()
def _poll(self):
if self.last_status != UPDATE_STATUS_UPDATING:
# Refresh update site URL
self.set_version_finder(self._manager.get_version_finder(refresh_engines=True))
log.debug('Polling %s for application update, current version is %s', self.update_site,
self._manager.get_version())
status = self._get_update_status()
if status != self.last_status:
self.last_status = status
self._handle_status()
return status != UPDATE_STATUS_UNAVAILABLE_SITE
else:
return True
def _handle_status(self):
update_status = self.last_status[0]
update_version = self.last_status[1]
if update_status == UPDATE_STATUS_UNAVAILABLE_SITE:
# Update site unavailable
log.warning("Update site is unavailable, as a consequence"
" update features won't be available")
elif update_status in [UPDATE_STATUS_MISSING_INFO,
UPDATE_STATUS_MISSING_VERSION]:
# Information or version missing in update site
log.warning("Some information or version file is missing in"
" the update site, as a consequence update"
" features won't be available")
else:
# Update information successfully fetched
log.debug("Fetched information from update site %s: update"
" status = '%s', update version = '%s'",
self.update_site, update_status, update_version)
if update_status in [UPDATE_STATUS_DOWNGRADE_NEEDED, UPDATE_STATUS_UPGRADE_NEEDED]:
# Current client version not compatible with server
# version, upgrade or downgrade needed.
# Let's stop synchronization.
log.info("As current client version is not compatible with"
" server version, an upgrade or downgrade is"
" needed. Synchronization won't start until then.")
self._manager.stop()
elif update_status == UPDATE_STATUS_UPDATE_AVAILABLE and self._manager.get_auto_update():
# Update available and auto-update checked, let's process update
log.info("An application update is available and"
" auto-update is checked")
self.last_status = (UPDATE_STATUS_UPDATING, update_version, 0)
try:
self._update(update_version)
except UpdateError:
log.error("An error occurred while trying to automatically update Nuxeo Drive to version %s,"
" setting 'Auto update' to False", update_version, exc_info=True)
self._manager.set_auto_update(False)
elif update_status == UPDATE_STATUS_UPDATE_AVAILABLE and not self._manager.get_auto_update():
# Update available and auto-update not checked, let's just
# update the systray notification and let the user explicitly choose to update
log.info("An update is available and auto-update is not"
" checked, let's just update the systray notification"
" and let the user explicitly choose to update")
self.updateAvailable.emit()
else:
# Application is up-to-date
log.debug("Application is up-to-date")
def set_version_finder(self, version_finder):
self.esky_app._set_version_finder(version_finder)
self.update_site = self.esky_app.version_finder.download_url
def get_active_version(self):
return self.esky_app.active_version
def get_current_latest_version(self):
return self.esky_app.version
def find_versions(self):
try:
return sorted(self.esky_app.version_finder.find_versions(
self.esky_app), cmp=version_compare)
except URLError as e:
self._handle_URL_error(e)
except socket.timeout as e:
self._handle_timeout_error(e)
def get_server_min_version(self, client_version):
|
ciechowoj/minion
|
output.py
|
Python
|
mit
| 6,974
| 0.004158
|
import sublime, sublime_plugin
def clean_layout(layout):
row_set = set()
col_set = set()
for cell in layout["cells"]:
row_set.add(cell[1])
row_set.add(cell[3])
col_set.add(cell[0])
col_set.add(cell[2])
row_set = sorted(row_set)
col_set = sorted(col_set)
rows = layout["rows"]
cols = layout["cols"]
layout["rows"] = [row for i, row in enumerate(rows) if i in row_set]
layout["cols"] = [col for i, col in enumerate(cols) if i in col_set]
row_map = { row : i for i, row in enumerate(row_set) }
col_map = { col : i for i, col in enumerate(col_set) }
layout["cells"] = [[col_map[cell[0]], row_map[cell[1]], col_map[cell[2]], row_map[cell[3]]] for cell in layout["cells"]]
return layout
def collapse_group(group):
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
window = sublime.active_window()
layout = window.get_layout()
cells = layout["cells"]
new_cells = []
group_cell = cells[group]
cells = cells[:group] + cells[group + 1:]
for cell in cells:
if cell[BOTTOM] == group_cell[TOP] and cell[LEFT] >= group_cell[LEFT] and cell[RIGHT] <= group_cell[RIGHT]:
new_cells.append([
cell[LEFT],
cell[TOP],
cell[RIGHT],
group_cell[BOTTOM]
|
])
elif cell != group_cell:
new_cells.append(cell)
layout["cells"] = new_cells
window.set_layout(clean_layout(layout))
class OutputView:
content = ""
position = 0.0
id = None
def __init__(self, view):
self.view = view
def __getattr__(self, name):
if self.view.id() != id:
output = OutputView.find_view()
|
if output:
self.view = output.view
return getattr(self.view, name)
def clear(self):
OutputView.content = ""
self.run_command("output_view_clear")
def append(self, text):
OutputView.content += text
self.run_command("output_view_append", { "text" : text })
def append_finish_message(self, command, working_dir, return_code, elapsed_time):
if return_code != 0:
templ = "[Finished in {:.2f}s with exit code {}]\n"
self.append(templ.format(elapsed_time, return_code))
self.append("[cmd: {}]\n".format(command))
self.append("[dir: {}]\n".format(working_dir))
else:
self.append("[Finished in {:.2f}s]\n".format(elapsed_time))
def _collapse(self, group):
window = sublime.active_window()
views = window.views_in_group(group)
if (len(views) == 0 or len(views) == 1 and
views[0].id() == self.view.id()):
collapse_group(group)
def _close(self):
window = sublime.active_window()
group, index = window.get_view_index(self.view)
window.run_command("close_by_index", {"group": group, "index": index})
self._collapse(group)
OutputView.id = None
@staticmethod
def close():
window = sublime.active_window()
for view in window.views():
if view.is_scratch() and view.name() == "Output":
OutputView(view)._close()
@staticmethod
def find_view():
window = sublime.active_window()
for view in window.views():
if view.is_scratch() and view.name() == "Output":
return OutputView(view)
return None
@staticmethod
def create():
view = OutputView.request()
view.clear()
return view
@staticmethod
def request():
window = sublime.active_window()
num_groups = window.num_groups()
if num_groups < 3:
layout = window.get_layout()
num_rows = len(layout["rows"]) - 1
num_cols = len(layout["cols"]) - 1
if len(layout["rows"]) < 3:
begin = layout["rows"][-2]
end = layout["rows"][-1]
layout["rows"] = layout["rows"][:-1] + [begin * 0.33 + end * 0.66, layout["rows"][-1]]
cells = []
new_num_rows = len(layout["rows"]) - 1
for cell in layout["cells"]:
if cell[3] == num_rows and cell[2] != num_cols:
cells.append([cell[0], cell[1], cell[2], new_num_rows])
else:
cells.append(cell)
cells.append([num_cols - 1, new_num_rows - 1, num_cols, new_num_rows])
layout["cells"] = cells
window.set_layout(layout)
num_groups = window.num_groups()
views = window.views_in_group(num_groups - 1)
output = None
for view in views:
if view.name() == "Output" and view.is_scratch():
output = view
if output == None:
active = window.active_view()
output = window.new_file()
output.settings().set("line_numbers", False)
output.settings().set("scroll_past_end", False)
output.settings().set("scroll_speed", 0.0)
output.settings().set("gutter", False)
output.settings().set("spell_check", False)
output.set_scratch(True)
output.set_name("Output")
output.run_command("output_view_append", { "text" : OutputView.content })
def update():
output.set_viewport_position((0, OutputView.position), False)
sublime.set_timeout(update, 0.0)
OutputView.id = output.id()
window.set_view_index(output, num_groups - 1, len(views))
window.focus_view(active)
return OutputView(output)
class OutputViewClearCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.erase(edit, sublime.Region(0, self.view.size()))
class OutputViewAppendCommand(sublime_plugin.TextCommand):
def run(self, edit, text):
scroll = self.view.visible_region().end() == self.view.size()
view = self.view
view.insert(edit, view.size(), text)
if scroll:
viewport = view.viewport_extent()
last_line = view.text_to_layout(view.size())
view.set_viewport_position((0, last_line[1] - viewport[1]), False)
class OpenOutputCommand(sublime_plugin.WindowCommand):
def run(self):
OutputView.request()
class CloseOutputCommand(sublime_plugin.ApplicationCommand):
def run(self):
OutputView.close()
class OutputEventListener(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
print(key)
if key == "output_visible":
return OutputView.find_view() != None
else:
return None
def on_close(self, view):
if view.is_scratch() and view.name() == "Output":
OutputView.position = view.viewport_position()[1]
|
elephanter/mongoengine
|
mongoengine/base/datastructures.py
|
Python
|
mit
| 15,294
| 0.001504
|
import weakref
import functools
import itertools
from mongoengine.common import _import_class
from mongoengine.errors import DoesNotExist, MultipleObjectsReturned
__all__ = ("BaseDict", "BaseList", "EmbeddedDocumentList")
class BaseDict(dict):
"""A special dict so we can watch any changes"""
_dereferenced = False
_instance = None
_name = None
def __init__(self, dict_items, instance, name):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(instance, (Document, EmbeddedDocument)):
self._instance = weakref.proxy(instance)
self._name = name
return super(BaseDict, self).__init__(dict_items)
def __getitem__(self, key, *args, **kwargs):
value = super(BaseDict, self).__getitem__(key)
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(value, EmbeddedDocument) and value._instance is None:
value._instance = self._instance
elif not isinstance(value, BaseDict) and isinstance(value, dict):
value = BaseDict(value, None, '%s.%s' % (self._name, key))
super(BaseDict, self).__setitem__(key, value)
value._instance = self._instance
elif not isinstance(value, BaseList) and isinstance(value, list):
value = BaseList(value, None, '%s.%s' % (self._name, key))
super(BaseDict, self).__setitem__(key, value)
value._instance = self._instance
return value
def __setitem__(self, key, value, *args, **kwargs):
self._mark_as_changed(key)
return super(BaseDict, self).__setitem__(key, value)
def __delete__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delete__(*args, **kwargs)
def __delitem__(self, key, *args, **kwargs):
self._mark_as_changed(key)
return super(BaseDict, self).__delitem__(key)
def __delattr__(self, key, *args, **kwargs):
self._mark_as_changed(key)
return super(BaseDict, self).__delattr__(key)
def __getstate__(self):
self.instance = None
self._dereferenced = False
return self
def __setstate__(self, state):
self = state
return self
def clear(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).clear(*args, **kwargs)
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).popitem(*args, **kwargs)
def setdefault(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).setdefault(*args, **kwargs)
def update(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).update(*args, **kwargs)
def _mark_as_changed(self, key=None):
if hasattr(self._instance, '_mark_as_changed'):
if key:
self._instance._mark_as_changed('%s.%s' % (self._name, key))
else:
self._instance._mark_as_changed(self._name)
class BaseList(list):
"""A special list so we can watch any changes
"""
_dereferenced = False
_instance = None
_name = None
def __init__(self, list_items, instance, name):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(instance, (Document, EmbeddedDocument)):
self._instance = weakref.proxy(instance)
self._name = name
super(BaseList, self).__init__(list_items)
def __getitem__(self, key, *args, **kwargs):
value = super(BaseList, self).__getitem__(key)
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(value, EmbeddedDocument) and value._instance is None:
value._instance = self._instance
elif not isinstance(value, BaseDict) and isinstance(value, dict):
value = BaseDict(value, None, '%s.%s' % (self._name, key))
super(BaseList, self).__setitem__(key, value)
value._instance = self._instance
elif not isinstance(value, BaseList) and isinstance(value, list):
value = BaseList(value, None, '%s.%s' % (self._name, key))
super(BaseList, self).__setitem__(key, value)
value._instance = self._instance
return value
def __setitem__(self, key, value, *args, **kwargs):
if isinstance(key, slice):
self._mark_as_changed()
else:
self._mark_as_changed(key)
return super(BaseList, self).__setitem__(key, value)
def __delitem__(self, key, *args, **kwargs):
if isinstance(key, slice):
self._mark_as_changed()
else:
self._mark_as_changed(key)
return super(BaseList, self).__delitem__(key)
def __setslice__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__setslice__(*args, **kwargs)
def __delslice__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__delslice__(*args, **kwargs)
def __getstate__(self):
self.instance = None
self._dereferenced = False
return self
def __setstate__(self, state):
self = state
return self
def append(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).append(*args, **kwargs)
def extend(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).extend(*args, **kwargs)
def insert(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).insert(*args, **kwargs)
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).pop(*args, **kwargs)
def remove(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).remove(*args, **kwargs)
def reverse(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).reverse(*args, **kwargs)
def sort(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).sort(*args, **kwargs)
def _mark_as_changed(self, key=None):
if hasattr(self._instance, '_mark_as_changed'):
if key:
self._instance._mark_as_changed('%s.%s' % (self._name, key))
else:
self._instance._mark_as_changed(self._name)
class EmbeddedDocumentList(BaseList):
@classmethod
def __match_all(cls, i, kwargs):
items = kwargs.items()
return all([
getattr(i, k) == v or str(getattr(i, k)) == v for k, v in items
])
@classmethod
def __only_matches(cls, obj, kwargs):
if not kwargs:
return obj
return filter(lambda i: cls.__match_all(i, kwargs), obj)
def __init__(self, list_items, instance, name):
super(EmbeddedDocumentList, self).__init__(list_items, instance, name)
self._instance = instance
def filter(self, **kwargs):
"""
Filters the list by only including embedded documents with the
given keyword arguments.
:param kwargs: The keyword arguments corresponding to the fields to
filter on. *Multiple arguments are treated as if they are ANDed
together.*
:return: A new ``EmbeddedDocumentList`` containing the matching
embedded documents.
Raises ``AttributeError`` if a given keyword is not a valid field for
the embedded document class.
"""
values = self.__only_matches(self, kwargs)
return EmbeddedDocumentList(values, self._instance, self._name)
def exclude(self
|
, **kwargs):
"""
Filters the list by excluding embedded documents with the given
keyword arguments.
:param kwargs: The keyword arguments corresponding to the fields to
exclude on. *
|
Multiple
|
HybridF5/hybrid-jacket
|
nova_jacket/virt/jacket/vcloud/vcloud.py
|
Python
|
apache-2.0
| 51,929
| 0.003216
|
# VMware vCloud Python helper
# Copyright (c) 2014 Huawei, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from threading import Lock
from StringIO import StringIO
from oslo.utils import excutils
from oslo.vmware.common import loopingcall
from requests import exceptions as requests_excep
from nova.i18n import _, _LI, _LW, _LE
from nova.openstack.common import log as logging
from nova.virt.jacket.vcloud import exceptions
from pyvcloud.helper import CommonUtils
from pyvcloud.vapp import VAPP as sdk_vapp
from pyvcloud.vcloudair import VCA as sdk_vca
from pyvcloud.schema.vcd.v1_5.schemas.vcloud import vAppType, \
organizationListType, vdcType, catalogType, queryRecordViewType, \
networkType, vcloudType, taskType, vAppTemplateType, vmsType
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import IpScopeType,\
OrgVdcNetworkType, ReferenceType, NetworkConfigurationType, \
IpScopesType, IpRangesType, IpRangeType, DhcpPoolServiceType
LOG = logging.getLogger(__name__)
class VCLOUD_STATUS:
"""
status Attribute Values for VAppTemplate, VApp, Vm, and Media Objects
"""
FAILED_CREATION = -1
UNRESOLVED = 0
RESOLVED = 1
DEPLOYED = 2
SUSPENDED = 3
POWERED_ON = 4
WAITING_FOR_INPUT = 5
UNKNOWN = 6
UNRECOGNIZED = 7
POWERED_OFF = 8
INCONSISTENT_STATE = 9
MIXED = 10
DESCRIPTOR_PENDING = 11
COPYING_CONTENTS = 12
DISK_CONTENTS_PENDING = 13
QUARANTINED = 14
QUARANTINE_EXPIRED = 15
REJECTED = 16
TRANSFER_TIMEOUT = 17
VAPP_UNDEPLOYED = 18
VAPP_PARTIALLY_DEPLOYED = 19
def synchronized(method):
"""
A decorator object that used to synchronized method.
"""
def new_synchronized_method(self, *args, **kwargs):
if hasattr(self, "_auto_lock"):
with self._auto_lock:
return method(self, *args, **kwargs)
else:
raise AttributeError("Object is missing _auto_lock")
return new_synchronized_method
class RetryDecorator(object):
# TODO(nkapotoxin) Use oslo_utils.excutils.py instead.
"""Decorator for retrying a function upon suggested exceptions.
The decorated function is retried for the given number of times, and the
sleep time between the retries is incremented until max sleep time is
reached. If the max retry count is set to -1, then the decorated function
is invoked indefinitely until an exception is thrown, and the caught
exception is not in the list of suggested exceptions.
"""
def __init__(self, max_retry_count=-1, inc_sleep_time=10,
max_sleep_time=10, exceptions=()):
"""Configure the retry object using the input params.
:param max_retry_count: maximum number of times the given function must
be retried when one of the input 'exceptions'
is caught. When set to -1, it will be retried
indefinitely until an exception is thrown
and the caught exception is not in param
exceptions.
:param inc_sleep_time: incremental time in seconds for sleep time
between retries
:param max_sleep_time: max sleep time in seconds beyond which the sleep
time will not be incremented using param
inc_sleep_time. On reaching this threshold,
max_sleep_time will be used as the sleep time.
:param exceptions: suggested exceptions for which the function must be
retried
"""
self._max_retry_count = max_retry_count
self._inc_sleep_time = inc_sleep_time
self._max_sleep_time = max_sleep_time
self._exceptions = exceptions
self._retry_count = 0
self._sleep_time = 0
def __call__(self, f):
def _func(*args, **kwargs):
func_name = f.__name__
result = None
try:
if self._retry_count:
LOG.debug("Invoking %(func_name)s; retry count is "
"%(retry_count)d.",
{'func_name': func_name,
'retry_count': self._retry_count})
result = f(*args, **kwargs)
except self._exceptions:
with excutils.save_and_reraise_exception() as ctxt:
LOG.warn(_LW("Exception which is in the suggested list of "
"exceptions occurred while invoking function:"
" %s."),
func_name,
exc_info=True)
if (self._max_retry_count != -1 and
self._retry_count >= self._max_retry_count):
LOG.error(_LE("Cannot retry upon suggested exception "
"since retry count (%(retry_count)d) "
"reached max retry count "
"(%(max_retry_count)d)."),
{'retry_count': self._retry_count,
'max_retry_count': self._max_retry_count})
else:
ctxt.reraise = False
self._retry_count += 1
self._sleep_time += self._inc_sleep_time
return self._sleep_time
raise loopingcall.LoopingCallDone(result)
def func(*args, **kwargs):
loop = loopingcall.DynamicLoopingCall(_func, *args, **kwargs)
evt = loop.start(periodic_interval_max=self._max_sleep_time)
LOG.debug("Waiting for function %s to return.", f.__name__)
return evt.wait()
return func
class NetworkConfig(object):
def __init__(self, network_name=None, fence_mode=None, href=None):
self._network_name = network_name
self._fence_mode = fence_mode
self._href = href
@property
def network_name(self):
|
return self._network_name
@property
def fence_mode(self):
return self._fence_mode
@property
def href(self):
return self._href
class NetworkConnection(object):
def __init__(self, network_name=None, ip_allocation_mode=None, ip_address=None, mac_address=None):
self._network_name = network_name
self._ip_allocation_mode = ip_allocation_mode
self._ip_address = ip_address
|
self._mac_address = mac_address
@property
def network_name(self):
return self._network_name
@property
def ip_allocation_mode(self):
return self._ip_allocation_mode
@property
def ip_address(self):
return self._ip_address
@property
def mac_address(self):
return self._mac_address
class VCA(sdk_vca):
"""
Packaged Vmware vcloud python sdk vca.
Vclouddriver just use func here.
"""
def __init__(self, host, username, service_type='ondemand',
version='5.5', verify=True):
super(VCA, self).__init__(
host,
username,
service_type=service_type,
version=version,
verify=verify
)
def create_isolated_vdc_network(self, vdc_name, network_name, gateway_name,
start_address, end_address, gateway_ip,
netmask, dns1=None, dns2=None,
d
|
zrluety/penguin
|
penguin/scripts/penguin_cli.py
|
Python
|
mit
| 1,738
| 0.004028
|
import click
import os
|
import penguin.pdf as pdf
import penguin.utils as utils
def check_src(src):
if not all((map(utils.is_valid_source, src))):
raise click.BadParameter("src arguments must be either a valid directory"
" or pdf file.")
@click.group()
def penguin():
pass
@penguin.command()
@click.argument('src', nargs=-1)
@clic
|
k.argument('dst')
@click.option('--bookmark', 'bookmark', flag_value='include-bookmarks',
default=True)
@click.option('--remove-blank-pages', 'rmblanks', flag_value='remove-blanks-pages',
default=False)
def combine(src, dst, bookmark, rmblanks):
"""Combine Pdf files from the source provided into the destination file.
:param src: The source Pdf file(s). src can either be a list of individual
files or directories containing Pdf files.
:param dst: The output file destination.
:param bookmark: True if the combined Pdf should include bookmarks.
:param rmblanks: True if blank pages should be removed from the combined Pdf.
"""
check_src(src)
combined_pdf = pdf.combine(src, bookmark, rmblanks)
with open(dst, 'wb') as f:
combined_pdf.write(f)
@penguin.command()
@click.argument('src',)
@click.argument('pages', nargs=-1)
@click.argument('dst')
def split(src, pages, dst):
"""Split the specified pages from src into the the dst.
:param src: The source Pdf file (directory).
:param pages: The page number(s) to extract from each file.
:param dst: The output file destination.
"""
check_src(src)
combined_pdf = pdf.split(src, pages)
with open(dst, 'wb') as f:
combined_pdf.write(f)
if __name__ == '__main__':
penguin()
|
AjabWorld/ajabsacco
|
ajabsacco/core/migrations/0002_auto_20150508_0542.py
|
Python
|
apache-2.0
| 1,088
| 0.000919
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
cla
|
ss Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='loanproduct',
name='accounting_rules',
),
migrations.RemoveField(
model_name='loanproduct',
name='extra_fields',
),
migrations.RemoveField(
model_name='lo
|
anproduct',
name='fees',
),
migrations.RemoveField(
model_name='loanproduct',
name='meta',
),
migrations.RemoveField(
model_name='security',
name='meta',
),
migrations.AlterField(
model_name='member',
name='member_type',
field=models.IntegerField(default=4, choices=[(1, b'Staff'), (2, b'Customer'), (3, b'Corporate Customer'), (4, b'Customer'), (5, b'Commitee Member')]),
preserve_default=True,
),
]
|
andrewjton/SHOUT
|
app/shout/yell/urls.py
|
Python
|
mit
| 435
| 0.013793
|
from django.conf.urls import url
from yell import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^testing/$', views.testing, name='
|
testing'),
url(r'^results/$', views.restaurants, name
|
='restaurants'),
url(r'^api/yelp_api/$', views.yelp_api, name="yelp_api"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
johnnoone/meuh-python
|
meuh/commands/distro.py
|
Python
|
mit
| 2,649
| 0.000378
|
"""
meuh.commands.distro
~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, print_function, unicode_literals
__all__ = ['InitCommand',
'DestroyAllCommand',
'DestroyCommand',
'ShowCommand']
import logging
from cliff.command import Command
from meuh.action import distro_dockerfile, distro_init, distributions, distro_destroy
from meuh.conf import settings
from meuh.exceptions import NotFound
class InitCommand(Command):
'create distribution'
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
par
|
ser = super(InitCommand, self).get_parser(prog_name)
parser.add_argument(
|
'distro')
parser.add_argument('--force', action='store_true')
return parser
def take_action(self, parsed_args):
data = distro_init(parsed_args.distro, parsed_args.force)
print('created %s %s' % (parsed_args.distro, data))
class ShowCommand(Command):
'show distribution'
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(ShowCommand, self).get_parser(prog_name)
parser.add_argument('distro')
return parser
def take_action(self, parsed_args):
data = distro_dockerfile(parsed_args.distro)
print(data)
class ListCommand(Command):
'list distributions'
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
for name, status in distributions().items():
print(name, status)
class DestroyCommand(Command):
'destroy a single distro'
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(DestroyCommand, self).get_parser(prog_name)
parser.add_argument('distro')
parser.add_argument('--force', action='store_true')
return parser
def take_action(self, parsed_args):
distro_destroy(parsed_args.distro, parsed_args.force)
self.log.info('%s has been destroyed' % parsed_args.bot)
class DestroyAllCommand(Command):
'destroy all distros'
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(DestroyAllCommand, self).get_parser(prog_name)
parser.add_argument('--force', action='store_true')
return parser
def take_action(self, parsed_args):
for name in settings.distros.keys():
try:
distro_destroy(name, parsed_args.force)
self.log.info('%s has been destroyed' % name)
except NotFound:
pass
except Exception as e:
self.log.error(e)
|
lm-tools/situational
|
situational/apps/sectors/forms.py
|
Python
|
bsd-3-clause
| 2,817
| 0
|
from django import forms
from django.forms.forms import BoundField
from .helpers import LMIForAllClient
from .fields import MultiCharField
class FieldSet(object):
"""
Taken from stackoverflow.com/questions/10366745/django-form-field-grouping
Helper class to group BoundField objects together.
"""
def __init__(self, form, fields, legend='', cls=None):
self.form = form
self.legend = legend
self.fields = fields
self.cls = cls
def __iter__(self):
for name in self.fields:
field = self.form.fields[name]
yield BoundField(self.form, field, name)
class NoColonForm(forms.Form):
"""
Removes the default colons from form labels.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('label_suffix', '')
super().__init__(*args, **kwargs)
class BaseLMIForm(NoColonForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lmi_client = LMIForAllClient()
class SectorForm(NoColonForm):
SECTOR_INPUT_COUNT = 3
sector = MultiCharField(
count=SECTOR_INPUT_COUNT,
label="How would you describe the types of jobs you could do?",
help_text=" eg customer services, security, data entry, driver",
require_all_fields=False,
error_messages={'required': 'Enter at least one job role', },
)
class JobDescriptionsForm(BaseLMIForm):
def __init__(self, *args, **kwargs):
keywords = kwargs['keywords']
del kwargs['keywords']
super().__init__(*args, **kwargs)
self.fieldsets = []
self._add_fields_from_keywords(keywords)
def _add_fi
|
elds_from_keywords(self, keywords):
for keyword in keywords:
if keyword:
soc_codes = []
lmi_data = self.lmi_client.keyword_search(keyword)
count = 6
for item in lmi_data[:count]:
soc_code = str(item['soc'])
if soc_code not in soc_codes:
soc_codes.append(soc_code)
field = forms.BooleanField
|
(
widget=forms.CheckboxInput,
label=item['title'],
help_text=item['description'],
required=False,
)
self.fields[soc_code] = field
self.fieldsets.append(FieldSet(
self, list(soc_codes), keyword))
def clean(self):
cleaned_data = super().clean()
if not any(cleaned_data.values()):
raise forms.ValidationError(
"Please select at least one job title",
code='invalid'
)
return cleaned_data
|
scode/pants
|
contrib/spindle/src/python/pants/contrib/spindle/tasks/spindle_gen.py
|
Python
|
apache-2.0
| 10,402
| 0.007402
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from collections import defaultdict
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants.option.custom_types import list_option
from twitter.common.dirutil import safe_mkdir
from pants.contrib.spindle.targets.spindle_thrift_library import SpindleThriftLibrary
class SpindleGen(NailgunTask):
@classmethod
def product_types(cls):
return [
'scala',
]
@classmethod
def register_options(cls, register):
super(SpindleGen, cls).register_options(register)
register(
'--jvm-options',
default=[],
advanced=True,
type=list_option,
help='Use these jvm options when running Spindle.',
)
register(
'--runtime-dependency',
default=['3rdparty:spindle-runtime'],
advanced=True,
type=list_option,
help='A list of targets that all spindle codegen d
|
epends on at runtime.',
)
cls.register_jvm_tool(register,
'spindle-codegen',
classpath=[
JarDependency(org='com.foursquare',
name='spindle-codegen-binary_2.10',
rev='3.0.0-M7'),
])
@classmethod
def prepare(cls, options, round_manager):
super(SpindleGen, cls).prepare(options,
|
round_manager)
round_manager.require_data('jvm_build_tools_classpath_callbacks')
@property
def spindle_classpath(self):
return self.tool_classpath('spindle-codegen')
@property
def synthetic_target_extra_dependencies(self):
return set(
dep_target
for dep_spec in self.get_options().runtime_dependency
for dep_target in self.context.resolve(dep_spec)
)
@property
def namespace_out(self):
return os.path.join(self.workdir, 'scala_record')
def codegen_targets(self):
return self.context.targets(lambda t: isinstance(t, SpindleThriftLibrary))
def sources_generated_by_target(self, target):
return [
os.path.join(self.namespace_out, relative_genned_source)
for thrift_source in target.sources_relative_to_buildroot()
for relative_genned_source in calculate_genfiles(thrift_source)
]
def execute_codegen(self, targets):
sources = self._calculate_sources(targets, lambda t: isinstance(t, SpindleThriftLibrary))
bases = set(
target.target_base
for target in self.context.targets(lambda t: isinstance(t, SpindleThriftLibrary))
)
scalate_workdir = os.path.join(self.workdir, 'scalate_workdir')
safe_mkdir(self.namespace_out)
safe_mkdir(scalate_workdir)
args = [
'--template', 'scala/record.ssp',
'--java_template', 'javagen/record.ssp',
'--thrift_include', ':'.join(bases),
'--namespace_out', self.namespace_out,
'--working_dir', scalate_workdir,
]
args.extend(sources)
result = self.runjava(classpath=self.spindle_classpath,
main='com.foursquare.spindle.codegen.binary.ThriftCodegen',
jvm_options=self.get_options().jvm_options,
args=args,
workunit_name='generate')
if result != 0:
raise TaskError('{} returned {}'.format(self.main_class, result))
def execute(self):
targets = self.codegen_targets()
build_graph = self.context.build_graph
with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:
for vts in invalidation_check.invalid_vts_partitioned:
invalid_targets = vts.targets
self.execute_codegen(invalid_targets)
invalid_vts_by_target = dict([(vt.target, vt) for vt in invalidation_check.invalid_vts])
vts_artifactfiles_pairs = defaultdict(list)
for target in targets:
java_synthetic_name = '{0}-{1}'.format(target.id, 'java')
java_sources_rel_path = os.path.relpath(self.namespace_out, get_buildroot())
java_synthetic_address = Address(java_sources_rel_path, java_synthetic_name)
java_generated_sources = [
os.path.join(os.path.dirname(source), 'java_{0}.java'.format(os.path.basename(source)))
for source in self.sources_generated_by_target(target)
]
java_relative_generated_sources = [os.path.relpath(src, self.namespace_out)
for src in java_generated_sources]
# We can't use context.add_new_target because it now does fancy management
# of synthetic target / target root interaction that breaks us here.
java_target_base = os.path.join(get_buildroot(), java_synthetic_address.spec_path)
if not os.path.exists(java_target_base):
os.makedirs(java_target_base)
SourceRoot.register(java_synthetic_address.spec_path)
build_graph.inject_synthetic_target(
address=java_synthetic_address,
target_type=JavaLibrary,
dependencies=[dep.address for dep in self.synthetic_target_extra_dependencies],
derived_from=target,
sources=java_relative_generated_sources,
)
java_synthetic_target = build_graph.get_target(java_synthetic_address)
# NOTE(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected. This walk is done below, after the scala synthetic target is
# injected.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=java_synthetic_target.address,
dependency=concrete_dependency_address,
)
if target in invalid_vts_by_target:
vts_artifactfiles_pairs[invalid_vts_by_target[target]].extend(java_generated_sources)
synthetic_name = '{0}-{1}'.format(target.id, 'scala')
sources_rel_path = os.path.relpath(self.namespace_out, get_buildroot())
synthetic_address = Address(sources_rel_path, synthetic_name)
generated_sources = [
'{0}.{1}'.format(source, 'scala')
for source in self.sources_generated_by_target(target)
]
relative_generated_sources = [os.path.relpath(src, self.namespace_out)
for src in generated_sources]
synthetic_target = self.context.add_new_target(
address=synthetic_address,
target_type=ScalaLibrary,
dependencies=self.synthetic_target_extra_dependencies,
sources=relative_generated_sources,
derived_from=target,
java_sources=[java_synthetic_target.address.spec],
)
# NOTE(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected. This walk also covers the invalidation for the java synthetic
# target above.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(dependent=dependent_address,
dependency=synthetic_target.address)
# NOTE(pl): See the above comment. The same note
|
thomashuang/Lilac
|
lilac/controller/__init__.py
|
Python
|
lgpl-3.0
| 136
| 0.007353
|
#!/usr/bin/env python
import logging
LOGGER = logging.getLogger('controller')
|
USER = 'user'
ROOT = 'root'
ADMIN = 'administrator'
|
|
arventwei/django_test
|
mysite/mysite/view.py
|
Python
|
mit
| 589
| 0.010187
|
from django.http imp
|
ort HttpResponse
import datetime
def hel
|
lo(request):
return HttpResponse("Hello world")
def home(request):
datetime.datetime.now()
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def hours_ahead(request, offset):
try:
offset = int(offset)
except ValueError:
raise Http404()
dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
html = "<html><body>In %s hour(s), it will be %s.</body></html>" % (offset, dt)
return HttpResponse(html)
|
Prokuma/cafe-order-system
|
register.py
|
Python
|
mit
| 598
| 0.006356
|
#-*- coding: utf-8 -*-
import hashlib
import getpass
from pymongo import MongoClient
id = raw_input("등록할 아이디를 입력하
|
세요: ")
pw = getpass.getpass("등록할 비밀번호를 입력하세요(입력하는 것은 보이지 않습니다): ")
client = MongoClient('localhost', 27017)
db = client.RnBCafe
member_collection = db.member
|
if member_collection.find_one({'id': id}) is None:
member_collection.insert({'id': id, 'password': hashlib.sha512(pw).hexdigest()})
print "성공적으로 등록이 완료되었습니다!"
else:
print "이미 있는 아이디입니다."
|
ANU-Linked-Earth-Data/middleware
|
batch-demo/import_agdc_data.py
|
Python
|
apache-2.0
| 12,109
| 0.000165
|
#!/usr/bin/env python3
"""Loads an HDF5 file representing Landsat data into a triple store via
SPARQL."""
from argparse import ArgumentParser
from base64 import b64encode
from io import StringIO, BytesIO
from itertools import islice, chain
from dateutil.parser import parse as date_parse
import h5py
import numpy as np
from scipy.misc import toimage
from screw_rdflib import (ConjunctiveGraph, Literal, Namespace, OWL, RDF, RDFS,
XSD, URIRef, BNode, Graph)
# RDF namespaces
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
LED = Namespace('http://www.example.org/ANU-LED#')
QB = Namespace('http://purl.org/linked-data/cube#')
SDMXC = Namespace('http://purl.org/linked-data/sdmx/2009/concept#')
SDMXD = Namespace('http://purl.org/linked-data/sdmx/2009/dimension#')
SDMXM = Namespace('http://purl.org/linked-data/sdmx/2009/measure#')
OGC = Namespace('http://www.opengis.net/ont/geosparql#')
# Default graph to update (doesn't really matter because default graph on query
# is union of all named graphs)
DEFAULT = LED.lsGraph
# Boilerplate turtles are the best turtles
BOILERPLATE_TURTLE = """
@prefix : <{LED}> .
@prefix rdf: <{RDF}> .
@prefix rdfs: <{RDFS}> .
@prefix xsd: <{XSD}> .
@prefix qb: <{QB}> .
@prefix sdmx-concept: <{SDMXC}> .
@prefix sdmx-dimension: <{SDMXD}> .
@prefix sdmx-measure: <{SDMXM}> .
@prefix geo: <{GEO}> .
@prefix owl: <{OWL}> .
@prefix ogc: <{OGC}> .
@prefix gcmd-platform: <http://geobrain.laits.gmu.edu/ontology/2004/11/gcmd-platform.owl#> .
@prefix gcmd-instrument: <http://geobrain.laits.gmu.edu/ontology/2004/11/gcmd-instrument.owl#> .
:landsatDSD a qb:DataStructureDefinition ;
qb:component :instrumentComponent
, :positionComponent
, :satelliteComponent
, :timeComponent
, :dataComponent
, :etmBandComponent
, :dggsComponent
, :dggsCellComponent
, :dggsLevelSquareComponent
, :dggsLevelPixelComponent .
:landsatDS a qb:DataSet ;
rdfs:label "Landsat
|
sensor data"@en ;
rdfs:comment "Some data from LandSat, retrieved from AGDC"@en ;
qb:structure :landsatDSD ;
:instrument gcmd-instrument:SCANNER ;
:satellite gcmd-platform:LANDSAT-7 ;
:dggs "rHEALPix WGS84 Ellipsoid" .
:instrumentComponent a qb:ComponentSpecification ;
qb:attribute :instrument .
:positionComponent a qb
|
:ComponentSpecification ;
qb:dimension :location .
:satelliteComponent a qb:ComponentSpecification ;
qb:attribute :satellite .
:timeComponent a qb:ComponentSpecification ;
qb:dimension :time .
:dataComponent a qb:ComponentSpecification ;
qb:measure :imageData .
:etmBandComponnet a qb:ComponentSpecification ;
qb:dimension :etmBand .
:dggsComponent a qb:ComponentSpecification ;
qb:attribute :dggs .
:dggsCellComponent a qb:ComponentSpecification ;
qb:dimension :dggsCell .
:dggsLevelSquareComponent a qb:ComponentSpecification ;
qb:dimension :dggsLevelSquare .
:dggsLevelPixelComponent a qb:ComponentSpecification ;
qb:dimension :dggsLevelPixel .
:etmBand a qb:AttributeProperty ;
rdfs:label "LandSat ETM observation band"@en;
rdfs:range xsd:integer .
:instrument a qb:AttributeProperty ;
rdfs:range gcmd-instrument:Instrument .
:satellite a qb:AttributeProperty ;
rdfs:range gcmd-platform:PLATFORM .
:time a qb:AttributeProperty ;
rdfs:range xsd:dateTime .
:dggs a qb:AttributeProperty ;
rdfs:range xsd:string .
:dggsCell a owl:DatatypeProperty, qb:DimensionProperty ;
rdfs:range xsd:string .
:dggsLevelSquare a qb:DimensionProperty ;
rdfs:range xsd:integer .
:dggsLevelPixel a qb:DimensionProperty ;
rdfs:range xsd:integer .
""".format(QB=QB, SDMXD=SDMXD, SDMXM=SDMXM, LED=LED, GEO=GEO, SDMXC=SDMXC,
RDF=RDF, RDFS=RDFS, XSD=XSD, OWL=OWL, OGC=OGC)
def slow(generator, suffix, interval=500, total=None):
"""Used to annotate slow generators. Will print progress every ``interval``
yields."""
tot_str = '/' + str(total) if total is not None else ''
for idx, val in enumerate(generator):
if idx % interval == 0:
print('{}{} {}'.format(idx, tot_str, suffix))
yield val
def array_to_png(array):
"""Turn a 2D array into a data: URI filled with PNG goodies :)"""
assert array.ndim == 2
# Convert to PIL image with transparent pixels for masked values
im = toimage(array)
mask = array.mask
if mask.shape:
# Only bother putting in an alpha channel if there are masked values
alpha = toimage(~mask)
im.putalpha(alpha)
else:
assert not mask, 'Need to have some unmasked values'
# Now save to base64-encoded data: URL
fp = BytesIO()
im.save(fp, format='png')
data = b64encode(fp.getvalue()).decode('utf-8')
return 'data:image/png;base64,' + data
def loc_triples(subj, prop, lat, lon):
"""Yield a bunch of triples indicating that something is at a given
latitude and longitude. This is actually really painful because of
blank nodes :("""
loc_bnode = BNode()
yield (subj, prop, loc_bnode)
yield (loc_bnode, GEO.lat, Literal(lat, datatype=XSD.decimal))
yield (loc_bnode, GEO.lon, Literal(lon, datatype=XSD.decimal))
def cell_level_square(cell_id):
"""Get level in DGGS hierarchy associated with slash-separated cell ID.
Maps `/R/0/0/0/0/5` to 5, for instance"""
return len([x for x in cell_id.split('/') if x])
def ident_for_tile(cell_id, level_square, level_pixel, band, meta):
dt = meta['datetime']
url_end = 'observation'
utc = dt.utctimetuple()
url_end += '/{utc.tm_year}/{utc.tm_mon}/{utc.tm_mday}/{utc.tm_hour}' \
'/{utc.tm_min}/{utc.tm_sec}'.format(utc=utc)
url_end += '/cell/' + cell_id.strip('/')
url_end += '/levelSquare-%i' % level_square
url_end += '/levelPixel-%i' % level_pixel
url_end += '/band-%i' % band
return LED[url_end]
def graph_for_data(cell_id, tile, band, meta):
is_pixel = tile.ndim <= 1
if is_pixel:
tile_size = 1
else:
tile_w, tile_h = tile.shape
assert tile_w == tile_h
tile_size = tile_w
# Find level in DGGS hierarchy of current square and current data
level_square = cell_level_square(cell_id)
if is_pixel:
level_pixel = level_square
else:
extra = np.log(tile_size) / np.log(3)
int_extra = int(round(extra))
assert abs(extra - int_extra) < 1e-5, \
'Tile size needs to be power of 3'
level_pixel = level_square + int_extra
ident = ident_for_tile(cell_id, level_square, level_pixel, band, meta)
# Bounding box for the tile, which we'll convert into WKT
bbox_corners = meta['bounds']
loc_wkt = Literal('POLYGON(({0}, {1}, {2}, {3}, {0}))'.format(
*['{} {}'.format(lon, lat) for lon, lat in bbox_corners]
), datatype=OGC.wktLiteral)
# Woooo this resolution calculation makes no sense
maxes = bbox_corners.max(axis=0)
mins = bbox_corners.min(axis=0)
res = np.mean(tile_size / np.abs(maxes - mins))
if is_pixel:
yield from [
(ident, LED.value, Literal(float(tile))),
(ident, RDF.type, LED.Pixel)
]
else:
png_tile = URIRef(array_to_png(tile))
yield from [
(ident, LED.imageData, png_tile),
(ident, RDF.type, LED.GridSquare)
]
# Actual data
yield from [
(ident, RDF.type, QB.Observation),
(ident, QB.dataSet, LED.landsatDS),
(ident, LED.bounds, loc_wkt),
(ident, LED.etmBand, Literal(band, datatype=XSD.integer)),
(ident, LED.time, Literal(meta['datetime'], datatype=XSD.datetime)),
(ident, LED.resolution,
Literal(res, datatype=XSD.decimal)),
(ident, LED.dggsCell, Literal(cell_id)),
(ident, LED.dggsLevelSquare, Literal(level_square)),
(ident, LED.dggsLevelPixel, Literal(level_pixel))
]
# Yield the centre point
centre_lon, centre_lat = meta['centre']
yield from loc_triples(ident, LED.location, centre_lat, centre_lon)
def convert_meta(src_meta):
"""Convert
|
pombredanne/ompc
|
ompclib/gplot/PlotItems.py
|
Python
|
bsd-3-clause
| 24,085
| 0.001412
|
# $Id: PlotItems.py,v 2.13 2003/08/18 22:33:00 mhagger Exp $
# Copyright (C) 1998-2003 Michael Haggerty <mhagger@alum.mit.edu>
#
# This file is licensed under the GNU Lesser General Public License
# (LGPL). See LICENSE.txt for details.
"""PlotItems.py -- Objects that can be plotted by Gnuplot.
This module contains several types of PlotItems. PlotItems can be
plotted by passing them to a Gnuplot.Gnuplot object. You can derive
your own classes from the PlotItem hierarchy to customize their
behavior.
"""
__cvs_version__ = '$Revision: 2.13 $'
import os, string, tempfile, types
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import Numeric
import gp, utils, Errors
class _unset:
"""Used to represent unset keyword arguments."""
pass
class PlotItem:
"""Plotitem represents an item that can be plotted by gnuplot.
For the finest control over the output, you can create 'PlotItems'
yourself with additional keyword options, or derive new classes
from 'PlotItem'.
The handling of options is complicated by the attempt to allow
options and their setting mechanism to be inherited conveniently.
Note first that there are some options that can only be set in the
constructor then never modified, and others that can be set in the
constructor and/or modified using the 'set_option()' member
function. The former are always processed within '__init__'. The
latter are always processed within 'set_option', which is called
by the constructor.
'set_option' is driven by a class-wide dictionary called
'_option_list', which is a mapping '{ <option> : <setter> }' from
option name to the function object used to set or change the
option. <setter> is a function object that takes two parameters:
'self' (the 'PlotItem' instance) and the new value requested for
the option. If <setter> is 'None', then the option is not allowed
to be changed after construction and an exception is raised.
Any 'PlotItem' that needs to add options can add to this
dictionary within its class definition. Follow one of the
examples in this file. Alternatively it could override the
'set_option' member function if it needs to do wilder things.
Members:
'_basecommand' -- a string hold
|
ing the elementary argument that
must be passed to gnuplot's `plot' command for this item;
e.g., 'sin(x)' or '"filename.dat"'.
'_options' -- a dictionary of (
|
<option>,<string>) tuples
corresponding to the plot options that have been set for
this instance of the PlotItem. <option> is the option as
specified by the user; <string> is the string that needs to
be set in the command line to set that option (or None if no
string is needed). Example::
{'title' : ('Data', 'title "Data"'),
'with' : ('linespoints', 'with linespoints')}
"""
# For _option_list explanation, see docstring for PlotItem.
_option_list = {
'axes' : lambda self, axes: self.set_string_option(
'axes', axes, None, 'axes %s'),
'with' : lambda self, with: self.set_string_option(
'with', with, None, 'with %s'),
'title' : lambda self, title: self.set_string_option(
'title', title, 'notitle', 'title "%s"'),
}
# order in which options need to be passed to gnuplot:
_option_sequence = [
'binary',
'index', 'every', 'thru', 'using', 'smooth',
'axes', 'title', 'with'
]
def __init__(self, **keyw):
"""Construct a 'PlotItem'.
Keyword options:
'with=<string>' -- choose how item will be plotted, e.g.,
with='points 3 3'.
'title=<string>' -- set the title to be associated with the item
in the plot legend.
'title=None' -- choose 'notitle' option (omit item from legend).
Note that omitting the title option is different than setting
'title=None'; the former chooses gnuplot's default whereas the
latter chooses 'notitle'.
"""
self._options = {}
apply(self.set_option, (), keyw)
def get_option(self, name):
"""Return the setting of an option. May be overridden."""
try:
return self._options[name][0]
except:
raise KeyError('option %s is not set!' % name)
def set_option(self, **keyw):
"""Set or change a plot option for this PlotItem.
See documentation for '__init__' for information about allowed
options. This function can be overridden by derived classes
to allow additional options, in which case those options will
also be allowed by '__init__' for the derived class. However,
it is easier to define a new '_option_list' variable for the
derived class.
"""
for (option, value) in keyw.items():
try:
setter = self._option_list[option]
except KeyError:
raise Errors.OptionError('%s=%s' % (option,value))
if setter is None:
raise Errors.OptionError(
'Cannot modify %s option after construction!', option)
else:
setter(self, value)
def set_string_option(self, option, value, default, fmt):
"""Set an option that takes a string value."""
if value is None:
self._options[option] = (value, default)
elif type(value) is types.StringType:
self._options[option] = (value, fmt % value)
else:
Errors.OptionError('%s=%s' % (option, value,))
def clear_option(self, name):
"""Clear (unset) a plot option. No error if option was not set."""
try:
del self._options[name]
except KeyError:
pass
def get_base_command_string(self):
raise NotImplementedError()
def get_command_option_string(self):
cmd = []
for opt in self._option_sequence:
(val,str) = self._options.get(opt, (None,None))
if str is not None:
cmd.append(str)
return string.join(cmd)
def command(self):
"""Build the plot command to be sent to gnuplot.
Build and return the plot command, with options, necessary to
display this item. If anything else needs to be done once per
plot, it can be done here too.
"""
return string.join([
self.get_base_command_string(),
self.get_command_option_string(),
])
def pipein(self, f):
"""Pipe necessary inline data to gnuplot.
If the plot command requires data to be put on stdin (i.e.,
'plot "-"'), this method should put that data there. Can be
overridden in derived classes.
"""
pass
class Func(PlotItem):
"""Represents a mathematical expression to plot.
Func represents a mathematical expression that is to be computed by
gnuplot itself, as if you would type for example::
gnuplot> plot sin(x)
into gnuplot itself. The argument to the contructor is a string
that should be a mathematical expression. Example::
g.plot(Func('sin(x)', with='line 3'))
As shorthand, a string passed to the plot method of a Gnuplot
object is also treated as a Func::
g.plot('sin(x)')
"""
def __init__(self, function, **keyw):
apply(PlotItem.__init__, (self,), keyw)
self.function = function
def get_base_command_string(self):
return self.function
class _FileItem(PlotItem):
"""A PlotItem representing a file that contains gnuplot data.
This class is not meant for users but rather as a base class for
other types of FileItem.
"""
_option_list = PlotItem._option_list.copy()
_option_list.update({
'binary' : lambda self, binary: self.set_option_binary(binary),
'index' : lambda self, value: self.set_option_colonsep('index', value),
'every' : lambda self, value: s
|
VA3SFA/rpi_hw_demo
|
hc-sr04/distance.py
|
Python
|
gpl-2.0
| 82
| 0.012195
|
#!/usr/bin/p
|
ython
# -*- coding: utf-8 -*-
#
|
Copyright 2015, Syed Faisal Akber
#
|
pvtodorov/indra
|
indra/tests/test_lincs_drug.py
|
Python
|
bsd-2-clause
| 980
| 0
|
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from nose.plugins.attrib import attr
from indra.databases.lincs_client import get_drug_target_data
from indra.sources.lincs_drug import process_from_web
@attr('webservice')
@unittest.skip('LINCS web service very unreliable.')
def test_process_
|
from_web():
lincs_p = process_from_web()
assert lincs_p is not None
assert lincs_p.statements
data_len = len(get_drug_target_data())
num_stmts = len(lincs_p.statements)
# Note that due to an erroneous entry in the HMS LINCS protein table,
# one Statement is not extracted from the table, hence the condition
# below, which should be kept as long as the error persists
assert num_stmts >= data_len - 1, \
("Did not convert all sta
|
tements: expected %d, got %d."
% (data_len, num_stmts))
assert all(len(s.evidence) > 0 for s in lincs_p.statements),\
"Some statements lack evidence."
|
SmartPeople/zulip
|
zerver/webhooks/crashlytics/view.py
|
Python
|
apache-2.0
| 1,910
| 0.002618
|
# Webhooks for external integrations.
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Any, Dict, Text
CRASHLYTICS_SUBJECT_TEMPLATE = '{display_id}: {title}'
CRASHLYTICS_MESSAGE_TEMPLATE = '[Issue]({url}) impacts at least {impacted_devices_count} device(s).'
CRASHLYTICS_SETUP_SUBJECT_TEMPLATE = "Setup"
CRASHLYTICS_SETUP_MESSAGE_TEMPLATE = "Webhook has been successfully configured."
VERIFICATION_EVENT = 'verification'
@api_key_only_webhook_view('Crashlytics')
@has_request_variables
def api_crash
|
lytics_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='crashlytics')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text) -> HttpResponse
try:
event = payload['event']
if event == VERIFICATION_EVENT:
subject = CRASHLYTICS_SETUP_SUBJECT_TEMPLATE
body = CRASHLYTICS_SETUP_MESSAGE_TEMPLATE
else:
issue_body = payload['payload']
su
|
bject = CRASHLYTICS_SUBJECT_TEMPLATE.format(
display_id=issue_body['display_id'],
title=issue_body['title']
)
body = CRASHLYTICS_MESSAGE_TEMPLATE.format(
impacted_devices_count=issue_body['impacted_devices_count'],
url=issue_body['url']
)
except KeyError as e:
return json_error(_("Missing key {} in JSON".format(str(e))))
check_send_message(user_profile, client, 'stream', [stream],
subject, body)
return json_success()
|
kburts/django-playlist
|
django_playlist/django_playlist/wsgi.py
|
Python
|
mit
| 1,578
| 0.001267
|
"""
WSGI config for django_playlist project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT =
|
dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings
|
"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_playlist.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
azurefang/flask-zheye
|
app/main/verbs.py
|
Python
|
lgpl-3.0
| 163
| 0.006135
|
from feedly.verbs import register
from
|
feedly.verbs.base import Verb
class Pin(Verb):
id = 5
infinitive = 'pin'
past_tense = 'pinned'
registe
|
r(Pin)
|
jiadaizhao/LeetCode
|
1601-1700/1637-Widest Vertical Area Between Two Points Containing No Points/1637-Widest Vertical Area Between Two Points Containing No Points.py
|
Python
|
mit
| 193
| 0
|
class Solution:
def maxWidthOfVerticalArea(self, points:
|
List[List[int]]) -> int:
xs = sorted(x for x, y in
|
points)
return max(xs[i] - xs[i - 1] for i in range(1, len(xs)))
|
seasonfif/python
|
learning/classmodule/Parent.py
|
Python
|
apache-2.0
| 1,829
| 0.03897
|
# coding=utf-8
class Parent(object):
__parentAttr = 100
_parentAttr = 100
parentAttr = 100
def __init__(self):
print "父类构造函数"
def parentMethod(self):
print "父类方法"
def _protectedMet
|
hod(self):
print "我是protected方法"
def __privateMethod(self):
print "我是private方法"
def overWriteMethod(self):
print "父类方法重写"
class Father(object):
__parentAttr = 200
_parentAttr = 200
parentAttr = 200
def __init_
|
_(self):
print "Father类构造函数"
def parentMethod(self):
print "Father类方法"
def _protectedMethod(self):
print "Father protected方法"
def __privateMethod(self):
print "Father private方法"
def overWriteMethod(self):
print "Father类方法重写"
class Child (Father,Parent):
childAttr = "100"
def __init__(self):
# 调用父类构造函数 老写法
# Parent.__init__(self)
# 新写法(父类需要继承object)
super(Child, self).__init__()
print "子类构造函数"
def childMethod(self):
print "子类方法"
def overWriteMethod(self):
# 每个父类单独调用
# Parent.overWriteMethod(self)
# 广度优先查找父类overWriteMethod方法,不会调用所有父类方法
super(Child, self).overWriteMethod()
print "子类方法重写"
c = Child()
c.parentMethod()
c.childMethod()
c._protectedMethod()
# 私有方法子类调用运行会报错
# c.__privateMethod()
# 通过以下方式访问私有方法
c._Parent__privateMethod()
print "访问父类公开变量:" + bytes(c.parentAttr)
print "访问父类保护变量:" + bytes(c._parentAttr)
# 子类调用父类私有变量运行会报错
# print "访问父类私有变量:" + bytes(c.__parentAttr)
# 通过以下方式访问私有变量
print "访问父类私有变量:" + bytes(c._Parent__parentAttr)
c.overWriteMethod()
|
beblount/Steer-Clear-Backend-Web
|
steerclear/__init__.py
|
Python
|
mit
| 723
| 0.006916
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
#initialize flask app wi
|
th correct configurations
app = Flask(__name__)
app.config.from_object('steerclear.settings.windows_settings')
app.config.from_envvar('STEERCLEAR_SETTINGS')
db = SQLAlchemy(app)
from flask.ext.login import LoginManager
login_manager = LoginManager()
login_manager.init_app(app)
from steerclear.api.views import api_bp
from steerclear.driver_portal.views import dr
|
iver_portal_bp
from steerclear.login.views import login_bp
# register all blueprints to the app
app.register_blueprint(api_bp)
app.register_blueprint(driver_portal_bp)
app.register_blueprint(login_bp)
# :TODO: generate actual secret key
app.secret_key = 'secret'
|
ctrlaltdel/neutrinator
|
vendor/openstack/tests/unit/block_storage/v2/test_type.py
|
Python
|
gpl-3.0
| 1,577
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.block_storage.v2 import type
FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff"
TYPE = {
"extra_specs": {
"capabilities": "gpu"
},
"id": FAKE_ID,
"name": "SSD"
}
class TestType(base.TestCase):
def test_basic(self):
sot = type.Type(**TYPE)
self.assertEqual("volume_type", sot.resource_key)
self.assertEqual("volume_types", sot.resources_key)
self.assertEqual("/types", sot.
|
base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
self.assertFalse(sot.allow_commit)
def test_new(self):
sot
|
= type.Type.new(id=FAKE_ID)
self.assertEqual(FAKE_ID, sot.id)
def test_create(self):
sot = type.Type(**TYPE)
self.assertEqual(TYPE["id"], sot.id)
self.assertEqual(TYPE["extra_specs"], sot.extra_specs)
self.assertEqual(TYPE["name"], sot.name)
|
sonofeft/ODSCharts
|
docs/sphinxy.py
|
Python
|
gpl-3.0
| 4,251
| 0.008469
|
#!/usr/bin/env python
# sphinxy: commandline continuous integration sphinx documentation
#
# Copyright (C) 2015 Charlie Taylor <ctatsourceforge@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
sphinxy - Script to monitor changes to *.rst files and rerun sphinx-build
Since sphinx-build checks for "out-of-date" rst files,
first "touch" all *.rst files to put them "out-of-date".
Then run "sphinx-build" command
sphinxy tries to launch a web browser with the index.html file from local disk.
This is not guaranteed to work on all systems.
After each rebuild, hit F5 (screen refresh) to see the new HTML doc files.
This is heavily adapted from nosy by Mike Steder at:
https://gist.github.com/steder/1220683
"""
__version__ = 1.0
import os
import subprocess
import stat
import sys
import time
import glob
import webbrowser
from keyboard_hit import KBHit
kb = KBHit()
TEST_RUNNER = "sphinx-build -b html -d _build/doctrees . _build/html"
PATTERNS = ["*.rst",]
TARGET_DIR = os.curdir
INDEX_PAGE = os.path.join( TARGET_DIR, '_build', 'html', 'index.html' )
webbrowser.open(INDEX_PAGE)
def print_instructions():
print('')
print('='*55)
print(' Hit F5 (Refresh) in Browser')
print('='*55)
print(' hit ESC or <ctrl>C to exit')
print(' hit "b" to launch webbrowser')
print(' hit any other key to rebuild HTML')
def checksum_directory(directory, touch_first=False):
"""
Walk directory structure and return simple checksum based on
file size and modified time.
"""
file_checksums = []
fileL = glob.glob( os.path.join(directory,'*.rst') )
for source_path in fileL:
if touch_first: #
os.utime(source_path, None)
try:
stats = os.stat(source_path)
except OSError:
# ignore temp files and files we don't
# have perms to access
continue
file_checksums.append(
stats[stat.ST_SIZE] + stats[stat.ST_MTIME])
return sum(file_checksums)
def main():
args = " ".join(sys.argv[1:])
command = "%s %s"%(TEST_RUNNER, args)
latest_checksum = checksum_directory(TARGET_DIR)
print( "Sphinxy starting with: %s"%(command) )
print( command )
subprocess.call(command.split())
print_instructions()
try:
while (True):
checksum = checksum_directory(TARGET_DIR, touch_first=False)
if checksum != latest_checksum:
print( "Sphinxy detected a change and is rerunning tests with: %s"%(command) )
latest_checksum = checksum_director
|
y(TARGET_DIR, touch_first=True)
|
subprocess.call(command.split())
print_instructions()
time.sleep(1)
if kb.kbhit():
c = kb.getch()
if ord(c) == 27: # ESC
sys.exit()
elif ord(c) == ord('b'): # launch browser
webbrowser.open(INDEX_PAGE)
else:
latest_checksum = -1
except KeyboardInterrupt:
print( "Exiting Sphinxy..." )
if __name__=="__main__":
main()
|
sivaprakashniet/push_pull
|
p2p/lib/python2.7/site-packages/celery/backends/base.py
|
Python
|
bsd-3-clause
| 19,323
| 0.000052
|
# -*- coding: utf-8 -*-
"""
celery.backends.base
~~~~~~~~~~~~~~~~~~~~
Result backend base classes.
- :class:`BaseBackend` defines the interface.
- :class:`KeyValueStoreBackend` is a common base class
using K/V semantics like _get and _put.
"""
from __future__ import absolute_import
import time
import sys
from datetime import timedelta
from billiard.einfo import ExceptionInfo
from kombu.serialization import (
dumps, loads, prepare_accept_content,
registry as serializer_registry,
)
from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8
from celery import states
from celery.app import current_task
from celery.exceptions import ChordError, TimeoutError, TaskRevokedError
from celery.five import items
from celery.result import result_from_tuple, GroupResult
from celery.utils import timeutils
from celery.utils.functional import LRUCache
from celery.utils.serialization import (
get_pickled_exception,
get_pickleable_exception,
create_exception_cls,
)
__all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend']
EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml'])
PY3 = sys.version_info >= (3, 0)
def unpickle_backend(cls, args, kwargs):
"""Return an unpickled backend."""
from celery import current_app
return cls(*args, app=current_app._get_current_object(), **kwargs)
class BaseBackend(object):
READY_STATES = states.READY_STATES
UNREADY_STATES = states.UNREADY_STATES
EXCEPTION_STATES = states.EXCEPTION_STATES
TimeoutError = TimeoutError
#: Time to sleep between polling each individual item
#: in `ResultSet.iterate`. as opposed to the `interval`
#: argument which is for each pass.
subpolling_interval = None
#: If true the backend must implement :meth:`get_many`.
supports_native_join = False
#: If true the backend must automatically expire results.
#: The daily backend_cleanup periodic task will not be triggered
#: in this case.
supports_autoexpire = False
#: Set to true if the backend is peristent by default.
persistent = True
def __init__(self, app, serializer=None,
max_cached_results=None, accept=None, **kwargs):
self.app = app
conf = self.app.conf
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
(self.content_type,
self.content_encoding,
self.encoder) = serializer_registry._encoders[self.serializer]
self._cache = LRUCache(
limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS,
)
self.accept = prepare_accept_content(
conf.CELERY_ACCEPT_CONTENT if accept is None else accept,
)
def mark_as_started(self, task_id, **meta):
"""Mark a task as started"""
return self.store_result(task_id, meta, status=states.STARTED)
def mark_as_done(self, task_id, result, request=None):
"""Mark task as successfully executed."""
return self.store_result(task_id, result,
status=states.SUCCESS, request=request)
def mark_as_failure(self, task_id, exc, traceback=None, request=None):
"""Mark task as executed with failure. Stores the execption."""
return self.store_result(task_id, exc, status=states.FAILURE,
traceback=traceback, request=request)
def fail_from_current_stack(self, task_id, exc=None):
type_, real_exc, tb = sys.exc_info()
try:
exc = real_exc if exc is None else exc
ei = ExceptionInfo((type_, exc, tb))
self.mark_as_failure(task_id, exc, ei.traceback)
return ei
finally:
del(tb)
def mark_as_retry(self, task_id, exc, traceback=None, request=None):
"""Mark task as being retries. Stores the current
exception (if any)."""
return self.store_result(task_id, exc, status=states.RETRY,
traceback=traceback, request=request)
def mark_as_revoked(self, task_id, reason='', request=None):
return self.store_result(task_id, TaskRevokedError(reason),
status=states.REVOKED, traceback=None,
request=request)
def prepare_exception(self, exc):
"""Prepare exception for serialization."""
if self.serializer in EXCEPTION_ABLE_CODECS:
return get_pickleable_exception(exc)
return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}
def exception_to_python(self, exc):
"""Convert serialized exception to Python exception."""
if self.serializer in EXCEPTION_ABLE_CODECS:
return get_pickled_exception(exc)
return create_exception_cls(
from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
def prepare_value(self, result):
"""Prepare value for storage."""
if isinstance(result, GroupResult):
return result.as_tuple()
return result
def encode(self, data):
_, _, payload = dumps(data, serializer=self.serializer)
return payload
def decode(self, payload):
payload = PY3 and payload or st
|
r(payload)
return loads(payload,
content_type=self.content_type,
content_encoding=self.content_encoding,
accept=self.accept)
def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
"""Wait for task and return its result.
I
|
f the task raises an exception, this exception
will be re-raised by :func:`wait_for`.
If `timeout` is not :const:`None`, this raises the
:class:`celery.exceptions.TimeoutError` exception if the operation
takes longer than `timeout` seconds.
"""
time_elapsed = 0.0
while 1:
status = self.get_status(task_id)
if status == states.SUCCESS:
return self.get_result(task_id)
elif status in states.PROPAGATE_STATES:
result = self.get_result(task_id)
if propagate:
raise result
return result
# avoid hammering the CPU checking status.
time.sleep(interval)
time_elapsed += interval
if timeout and time_elapsed >= timeout:
raise TimeoutError('The operation timed out.')
def prepare_expires(self, value, type=None):
if value is None:
value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
if isinstance(value, timedelta):
value = timeutils.timedelta_seconds(value)
if value is not None and type:
return type(value)
return value
def prepare_persistent(self, enabled=None):
if enabled is not None:
return enabled
p = self.app.conf.CELERY_RESULT_PERSISTENT
return self.persistent if p is None else p
def encode_result(self, result, status):
if status in self.EXCEPTION_STATES and isinstance(result, Exception):
return self.prepare_exception(result)
else:
return self.prepare_value(result)
def is_cached(self, task_id):
return task_id in self._cache
def store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
"""Update task state and result."""
result = self.encode_result(result, status)
self._store_result(task_id, result, status, traceback,
request=request, **kwargs)
return result
def forget(self, task_id):
self._cache.pop(task_id, None)
self._forget(task_id)
def _forget(self, task_id):
raise NotImplementedError('backend does not implement forget.')
def get_status(self, task_id):
"""Get the status of a task."""
return self.get_task_meta(task_id)['status']
def get_traceback(self, task_id):
"""Get the traceback for a failed task."""
return self.get_task_meta(task_id).get('traceback')
def get_result(self, task
|
robhudson/kuma
|
kuma/wiki/constants.py
|
Python
|
mpl-2.0
| 12,819
| 0.000546
|
import re
import bleach
from tower import ugettext_lazy as _lazy
ALLOWED_TAGS = bleach.A
|
LLOWED_TAGS + [
'div', 'span', 'p', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'pre', 'code', 'cite',
'dl', 'dt', 'dd', 'small', 'sub', 'sup', 'u', 'strike', 'samp', 'abbr',
'ul', 'ol', 'li',
'nobr', 'dfn', 'caption', 'var', 's',
'i', 'img', 'hr',
'input', 'label', 'select', 'option', 'textarea',
# Note: <iframe> is allowed, but src="" is pre-filtered before bleach
'iframe',
'table', 'tbody', 'thead', 'tfoot', 'tr', 'th', 'td', 'co
|
lgroup', 'col',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'figcaption',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'video', 'details', 'summary', 'datagrid', 'datalist',
'table', 'address', 'font',
'bdi', 'bdo', 'del', 'ins', 'kbd', 'samp', 'var',
'ruby', 'rp', 'rt', 'q',
# MathML
'math', 'maction', 'menclose', 'merror', 'mfenced', 'mfrac', 'mglyph',
'mi', 'mlabeledtr', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mroot', 'mrow', 'ms', 'mspace', 'msqrt', 'mstyle',
'msub', 'msup', 'msubsup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'mprescripts', 'semantics', 'annotation',
'annotation-xml',
]
ALLOWED_ATTRIBUTES = bleach.ALLOWED_ATTRIBUTES
ALLOWED_ATTRIBUTES['*'] = ['lang']
# Note: <iframe> is allowed, but src="" is pre-filtered before bleach
ALLOWED_ATTRIBUTES['iframe'] = ['id', 'src', 'sandbox', 'seamless',
'frameborder', 'width', 'height', 'class']
ALLOWED_ATTRIBUTES['p'] = ['style', 'class', 'id', 'align', 'lang', 'dir']
ALLOWED_ATTRIBUTES['span'] = ['style', 'class', 'id', 'title', 'lang', 'dir']
ALLOWED_ATTRIBUTES['abbr'] = ['style', 'class', 'id', 'title', 'lang', 'dir']
ALLOWED_ATTRIBUTES['img'] = ['src', 'id', 'align', 'alt', 'class', 'is',
'title', 'style', 'lang', 'dir', 'width',
'height']
ALLOWED_ATTRIBUTES['a'] = ['style', 'id', 'class', 'href', 'title',
'lang', 'name', 'dir', 'hreflang', 'rel']
ALLOWED_ATTRIBUTES['i'] = ['class']
ALLOWED_ATTRIBUTES['td'] = ['style', 'id', 'class', 'colspan', 'rowspan',
'lang', 'dir']
ALLOWED_ATTRIBUTES['th'] = ['style', 'id', 'class', 'colspan', 'rowspan',
'scope', 'lang', 'dir']
ALLOWED_ATTRIBUTES['video'] = ['style', 'id', 'class', 'lang', 'src',
'controls', 'dir']
ALLOWED_ATTRIBUTES['font'] = ['color', 'face', 'size', 'dir']
ALLOWED_ATTRIBUTES['details'] = ['open']
ALLOWED_ATTRIBUTES['select'] = ['name', 'dir']
ALLOWED_ATTRIBUTES['option'] = ['value', 'selected', 'dir']
ALLOWED_ATTRIBUTES['ol'] = ['style', 'class', 'id', 'lang', 'start', 'dir']
ALLOWED_ATTRIBUTES.update(dict((x, ['style', 'class', 'id', 'name', 'lang',
'dir'])
for x in
('h1', 'h2', 'h3', 'h4', 'h5', 'h6')))
ALLOWED_ATTRIBUTES.update(dict((x, ['style', 'class', 'id', 'lang', 'dir', 'title'])
for x in (
'div', 'pre', 'ul', 'li', 'code', 'dl', 'dt', 'dd',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'details', 'datagrid', 'datalist', 'table',
'tr', 'address', 'col', 's', 'strong'
)))
ALLOWED_ATTRIBUTES.update(dict((x, ['cite']) for x in (
'blockquote', 'del', 'ins', 'q'
)))
ALLOWED_ATTRIBUTES['li'] += ['data-default-state']
ALLOWED_ATTRIBUTES['time'] += ['datetime']
ALLOWED_ATTRIBUTES['ins'] = ['datetime']
ALLOWED_ATTRIBUTES['del'] = ['datetime']
# MathML
ALLOWED_ATTRIBUTES.update(dict((x, ['encoding', 'src']) for x in (
'annotation', 'annotation-xml')))
ALLOWED_ATTRIBUTES.update(
dict((x,
['href', 'mathbackground', 'mathcolor',
'id', 'class', 'style']) for x in ('math', 'maction', 'menclose',
'merror', 'mfenced', 'mfrac', 'mglyph',
'mi', 'mlabeledtr', 'mmultiscripts',
'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mroot', 'mrow', 'ms',
'mspace', 'msqrt', 'mstyle',
'msub', 'msup', 'msubsup', 'mtable',
'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'mprescripts')))
ALLOWED_ATTRIBUTES['math'] += [
'display', 'dir', 'selection', 'notation',
'close', 'open', 'separators', 'bevelled', 'denomalign', 'linethickness',
'numalign', 'largeop', 'maxsize', 'minsize', 'movablelimits', 'rspace',
'separator', 'stretchy', 'symmetric', 'depth', 'lquote', 'rquote', 'align',
'columnlines', 'frame', 'rowalign', 'rowspacing', 'rowspan', 'columnspan',
'accent', 'accentunder', 'dir', 'mathsize', 'mathvariant',
'subscriptshift', 'supscriptshift', 'scriptlevel', 'displaystyle',
'scriptsizemultiplier', 'scriptminsize', 'altimg', 'altimg-width',
'altimg-height', 'altimg-valign', 'alttext']
ALLOWED_ATTRIBUTES['maction'] += ['actiontype', 'selection']
ALLOWED_ATTRIBUTES['menclose'] += ['notation']
ALLOWED_ATTRIBUTES['mfenced'] += ['close', 'open', 'separators']
ALLOWED_ATTRIBUTES['mfrac'] += ['bevelled', 'denomalign', 'linethickness',
'numalign']
ALLOWED_ATTRIBUTES['mi'] += ['dir', 'mathsize', 'mathvariant']
ALLOWED_ATTRIBUTES['mi'] += ['mathsize', 'mathvariant']
ALLOWED_ATTRIBUTES['mmultiscripts'] += ['subscriptshift', 'superscriptshift']
ALLOWED_ATTRIBUTES['mo'] += ['largeop', 'lspace', 'maxsize', 'minsize',
'movablelimits', 'rspace', 'separator',
'stretchy', 'symmetric', 'accent',
'dir', 'mathsize', 'mathvariant']
ALLOWED_ATTRIBUTES['mover'] += ['accent']
ALLOWED_ATTRIBUTES['mpadded'] += ['lspace', 'voffset', 'depth']
ALLOWED_ATTRIBUTES['mrow'] += ['dir']
ALLOWED_ATTRIBUTES['ms'] += ['lquote', 'rquote', 'dir', 'mathsize',
'mathvariant']
ALLOWED_ATTRIBUTES['mspace'] += ['depth', 'height', 'width']
ALLOWED_ATTRIBUTES['mstyle'] += [
'display', 'dir', 'selection', 'notation',
'close', 'open', 'separators', 'bevelled', 'denomalign', 'linethickness',
'numalign', 'largeop', 'maxsize', 'minsize', 'movablelimits', 'rspace',
'separator', 'stretchy', 'symmetric', 'depth', 'lquote', 'rquote', 'align',
'columnlines', 'frame', 'rowalign', 'rowspacing', 'rowspan', 'columnspan',
'accent', 'accentunder', 'dir', 'mathsize', 'mathvariant',
'subscriptshift', 'supscriptshift', 'scriptlevel', 'displaystyle',
'scriptsizemultiplier',
'scriptminsize']
ALLOWED_ATTRIBUTES['msub'] += ['subscriptshift']
ALLOWED_ATTRIBUTES['msubsup'] += ['subscriptshift', 'superscriptshift']
ALLOWED_ATTRIBUTES['msup'] += ['superscriptshift']
ALLOWED_ATTRIBUTES['mtable'] += ['align', 'columnalign', 'columnlines',
'frame', 'rowalign', 'rowspacing', 'rowlines']
ALLOWED_ATTRIBUTES['mtd'] += ['columnalign', 'columnspan', 'rowalign',
'rowspan']
ALLOWED_ATTRIBUTES['mtext'] += ['dir', 'mathsize', 'mathvariant']
ALLOWED_ATTRIBUTES['mtr'] += ['columnalign', 'rowalign']
ALLOWED_ATTRIBUTES['munder'] += ['accentunder']
ALLOWED_ATTRIBUTES['mundermover'] = ['accent', 'accentunder']
# CSS
ALLOWED_STYLES = [
'border', 'border-top', 'border-right', 'border-bottom', 'border-left',
'float', 'overflow', 'min-height', 'vertical-align',
'white-space', 'color', 'border-radius', '-webkit-border-radius',
'-moz-border-radius, -o-border-radius',
'margin', 'margin-left', 'margin-top', 'margin-bottom', 'margin-right',
'padding', 'padding-left', 'padding-top', 'padding-bottom',
'padding-right', 'position', 'top', 'height', 'left', 'right',
'backg
|
xuru/pyvisdk
|
pyvisdk/do/host_license_connect_info.py
|
Python
|
mit
| 1,053
| 0.009497
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostLicenseConnectInfo(vim, *args, **kwargs):
'''This data object type describes license information stored on the host.'''
obj = vim.client.factory.create('ns0:HostLicenseConnectInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d
|
' % len(args))
required = [ 'evaluation', 'license' ]
optional = [ 'resource', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return
|
obj
|
FEniCS/dolfin
|
doc/generate_api_rst.py
|
Python
|
lgpl-3.0
| 8,783
| 0.001708
|
#!/usr/bin/env python
#
# Read doxygen xml files to find all members of the dolfin
# name space and generate API doc files per subdirectory of
# dolfin
#
# Written by Tormod Landet, 2017
#
from __future__ import print_function
import sys, os
import parse_doxygen
DOXYGEN_XML_DIR = 'doxygen/xml'
API_GEN_DIR = 'generated_rst_files'
SWIG_DIR = '../dolfin/swig/'
SWIG_FILE = 'docstrings.i'
MOCK_PY = 'mock_cpp_modules.py'
def get_subdir(hpp_file_name):
"""
Return "subdir" for a path name like
/path/to/dolfin/subdir/a_header.h
"""
path_components = hpp_file_name.split(os.sep)
path_components_rev = path_components[::-1]
idx = path_components_rev.index('dolfin')
subdir = path_components_rev[idx - 1]
return subdir
def get_short_path(hpp_file_name):
"""
Return "dolfin/subdir/a_header.h" for a path name like
/path/to/dolfin/subdir/a_header.h
"""
path_components = hpp_file_name.split(os.sep)
if 'dolfin' in path_components:
# dolfin header files
path_components_rev = path_components[::-1]
idx = path_components_rev.index('dolfin')
short_path = path_components_rev[:idx + 1]
else:
# ufc header files
short_path = path_components[-1:]
return os.sep.join(short_path[::-1])
def write_rst(subdir, subdir_members, api_gen_dir):
"""
Write files for Sphinx C++ API documentation
"""
rst_name = os.path.join(api_gen_dir, 'api_gen_%s.rst' % subdir)
print('Generating', rst_name)
# Make output directory
if not os.path.isdir(api_gen_dir):
os.mkdir(api_gen_dir)
prev_short_name = ''
with open(rst_name, 'wt') as rst:
rst.write('.. automatically generated by generate_api_rst.py and parse_doxygen.py\n')
#rst.write('dolfin/%s\n%s' % (subdir, '=' * 80))
#rst.write('\nDocumentation for C++ code found in dolfin/%s/*.h\n\n' % subdir)
rst.write('\n.. contents::\n\n\n')
kinds = [('typedef', 'Type definitions', 'doxygentypedef'),
('enum', 'Enumerations', 'doxygenenum'),
('function', 'Functions', 'doxygenfunction'),
('struct', 'Structures', 'doxygenstruct'),
('variable', 'Variables', 'doxygenvariable'),
('class', 'Classes', 'doxygenclass')]
for kind, kind_name, directive in kinds:
if kind in subdir_members:
# Write header H2
rst.write('%s\n%s\n\n' % (kind_name, '-'*70))
for name, member in sorted(subdir_members[kind].items()):
short_name = member.short_name
fn = get_short_path(member.hpp_file_name)
# Write header H3
if short_name != prev_short_name:
rst.write('%s\n%s\n\n' % (short_name, '~'*60))
prev_short_name = short_name
# Info about filename
rst.write('C++ documentation for ``%s`` from ``%s``:\n\n' % (short_name, fn))
# Write documentation for this item
rst.write(member.to_rst())
rst.write('\n\n')
def write_swig(subdir, subdir_members, swig_dir, swig_file_name, swig_header=''):
"""
Write files for SWIG so that we get docstrings in Python
"""
swig_subdir = os.path.join(swig_dir, subdir)
if not os.path.isdir(swig_subdir):
os.mkdir(swig_subdir)
swig_iface_name = os.path.join(swig_subdir, swig_file_name)
print('Generating', swig_iface_name)
with open(swig_iface_name, 'wt') as out:
out.write(swig_header)
out.write('// SWIG docstrings generated by doxygen and generate_api_rst.py / parse_doxygen.py\n\n')
for kind in subdir_members:
for name, member in sorted(subdir_members[kind].items()):
out.write(member.to_swig())
out.write('\n')
def write_mock_modules(namespace_members, mock_py_module):
"""
Write a mock module so that we can create documentation for
dolfin on ReadTheDocs where we cannot compile so that the
dolfin.cpp.* module are not available. We fake those, but
include the correct docstrings
"""
print('Generating', mock_py_module)
mydir = os.path.dirname(os.path.abspath(__file__))
swig_module_dir = os.path.join(mydir, '..', 'dolfin', 'swig', 'modules')
swig_module_dir = os.path.abspath(swig_module_dir)
if not os.path.isdir(swig_module_dir):
print('SWIG module directory is not present,', swig_module_dir)
print('No mock Python code will be generated')
return
with open(mock_py_module, 'wt') as out:
out.write('#!/usr/bin/env python\n')
out.write('#\n')
out.write('# This file is AUTO GENERATED!\n')
out.write('# This file is fake, full of mock stubs\n')
out.write('# This file is made by generate_api_rst.py\n')
out.write('#\n\n')
out.write('from __future__ import print_function\n')
out.write('from types import ModuleType\n')
out.write('import sys\n')
out.write('\n\nWARNING = "This is a mock object!"\n')
# Loop over SWIG modules and generate mock Python modules
for module_name in os.listdir(swig_module_dir):
module_i = os.path.join(swig_module_dir, module_name, 'module.i')
if not os.path.isfile(module_i):
continue
# Find out which headers are included in this SWIG module
included_headers = set()
for line in open(module_i):
if line.startswith('#include'):
header = line[8:].strip()[1:-1]
included_headers.add(header)
elif line.startswith('%import'):
header = line.split(')')[1].strip()[1:-1]
included_headers.add(header)
module_py_name = '_' + module_name
full_module_py_name = 'dolfin.cpp.' + module_py_name
out.write('\n\n' + '#'*80 + '\n')
out.write('%s = ModuleType("%s")\n' % (m
|
odule_py_name, full_module_py_name))
out.write('sys.modules["%s"] = %s\n' % (full_module_py_name, module_py_name))
out.write('\n')
print(' Generating module', full_module_py_name)
for member in namespace_members:
# Check if this member is included in the given SWIG module
hpp_file_name = get_short_path(member.hpp_file_name)
if hpp_file_name not in included_headers:
|
continue
out.write(member.to_mock(modulename=module_py_name))
out.write('\n\n')
def parse_doxygen_xml_and_generate_rst_and_swig(xml_dir, api_gen_dir, swig_dir, swig_file_name,
swig_header='', mock_py_module=''):
# Read doxygen XML files and split namespace members into
# groups based on subdir and kind (class, function, enum etc)
create_subdir_groups_if_missing = False
if os.path.isdir(xml_dir):
namespaces = parse_doxygen.read_doxygen_xml_files(xml_dir, ['dolfin', 'ufc'])
else:
raise OSError('Missing doxygen XML directory %r' % xml_dir)
# Group all documented members into subdir groups (io, la, mesh, fem etc)
sorted_members = list(namespaces['dolfin'].members.values())
sorted_members.sort(key=lambda m: m.name)
all_members = {}
for member in sorted_members:
subdir = get_subdir(member.hpp_file_name)
sd = all_members.setdefault(subdir, {})
kd = sd.setdefault(member.kind, {})
kd[member.name] = member
# Generate Sphinx RST files and SWIG interface files
for subdir, subdir_members in sorted(all_members.items()):
if subdir:
if api_gen_dir:
write_rst(subdir, subdir_members, api_gen_dir)
if swig_dir:
write_swig(subdir, subdir_members, swig_dir, swig_file_name, swig_header)
# Write UFC documenttation, no SWIG for UFC, only RST
if api_gen_dir:
ufc_members = {}
for member
|
axltxl/zenfig
|
zenfig/log.py
|
Python
|
mit
| 1,553
| 0.001288
|
# -*- coding: ut
|
f-8 -*-
"""
zenfig.log
~~~~~~~~~~~~~
Nice output
:copyright: (c) 2016 by Alejandro Ricoveri
:license: MIT, see LICENSE for more details.
"""
import sys
from clint.textui.colored import white, red, cyan, yellow, green
from clint.textui import puts
# Globals
_stdout = False
def init(*, quiet_stdout=True):
"""
Initiate the log module
:param threshold_lvl: m
|
essages under this level won't be issued/logged
:param to_stdout: activate stdout log stream
"""
# create stout handler
if not quiet_stdout:
global _stdout
_stdout = True
def to_stdout(msg, *, colorf=green, bold=False, quiet=True):
if not quiet or _stdout:
print(colorf(msg, bold=bold), file=sys.stderr)
def msg(message, *, bold=False):
"""
Log a regular message
:param message: the message to be logged
"""
to_stdout(" --- {message}".format(message=message), bold=bold)
def msg_warn(message):
"""
Log a warning message
:param message: the message to be logged
"""
to_stdout(" (!) {message}".format(message=message),
colorf=yellow, bold=True, quiet=False)
def msg_err(message):
"""
Log an error message
:param message: the message to be logged
"""
to_stdout(" !!! {message}".format(message=message),
colorf=red, bold=True, quiet=False)
def msg_debug(message):
"""
Log a debug message
:param message: the message to be logged
"""
to_stdout(" (*) {message}".format(message=message), colorf=cyan)
|
bath-hacker/binny
|
binny/db/models.py
|
Python
|
mit
| 1,478
| 0.004736
|
from django.db import models
from django.contrib.auth.models import User
class IntegerRangeField(models.IntegerField):
def __init__(self, verbose_name=None, name=None, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
models.IntegerField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'min_value': self.min_value, 'max_value':self.max_value}
defaults.update(kwargs)
return super(IntegerRangeField, self).formfield(**defaults)
# Create your models here.
class Bin(models.Model):
description = models.CharField(max_length=300, null=True)
long = models.DecimalField(decimal_places=7, max_digits=10)
lat = models.DecimalField(decimal_places=7, max_digits=10)
access = models.CharField(max_length=300, null=True)
image = models.URLField(null=True)
asset = models.CharField(null=True, max_length=300)
def __unicode__(self):
|
return 'ID:{0} {1}'.format(self.pk, self.description)
class Found(models.Model):
user = models.ForeignKey(User)
bin = models.ForeignKey(Bin)
date_added = mo
|
dels.DateField(auto_now_add=True)
difficulty = IntegerRangeField(min_value=1, max_value=5)
overflowing = models.BooleanField(default=False)
notes = models.CharField(max_length=140)
def __str__(self):
return '{0} found {1} on {2}'.format(self.user.username, self.bin.asset, self.date_added)
|
timoMa/vigra
|
vigranumpy/examples/boundary_gui/bv_feature_selection.py
|
Python
|
mit
| 3,993
| 0.013023
|
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import numpy#
from pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType
class FeatureSelectionDialog(QtGui.QDialog):
def __init__(self,viewer, parent):
super(FeatureSelectionDialog, self).__init__(parent)
self.resize(800,600)
self.viewer = viewer
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.Cancel)
self.buttonBox.accepted.connect(self.onPressAccepted)
def makeCheckBox(name, va
|
l=True):
return {
'name': name,
'type': 'bool',
'value': val,
#'tip': "This is a checkbox",
}
sigmaOpt = {'name': 'sigma', 'type': 'str', 'value': '[0.0, 1.0,
|
2.0, 4.0]' }
wardOpts = {'name': 'wardness', 'type': 'str', 'value': '[0.0, 0.1, 0.2]' }
filterChild = [
makeCheckBox("computeFilter"),
sigmaOpt,
{
'name':'UCM',
'children': [
makeCheckBox("ucmFilters"),
wardOpts,
{'name': 'meanSign', 'type': 'float', 'value': '1.0' }
]
}
]
params = [
{
'name' : "RawData",
'type' : 'group',
'children' : [
{
'name': 'Compute Features On Raw Data',
'type': 'bool',
'value': True,
'tip': "This is a checkbox",
},
{
'name' : "0-Order Filter",
'type' : 'group',
'children' : filterChild
},
{
'name' : "1-Order Filter",
'type' : 'group',
'children' : filterChild
},
{
'name' : "2-Order Filter",
'type' : 'group',
'children' : filterChild
}
]
},
#ComplexParameter(name='Custom parameter group (reciprocal values)'),
#ScalableGroup(name="Expandable Parameter Group", children=[
# {'name': 'ScalableParam 1', 'type': 'str', 'value': "default param 1"},
# {'name': 'ScalableParam 2', 'type': 'str', 'value': "default param 2"},
#]),
]
## Create tree of Parameter objects
self.p = Parameter.create(name='params', type='group', children=params)
self.t = ParameterTree()
self.t.setParameters(self.p, showTop=False)
self.layout.addWidget(self.t)
self.layout.addWidget(self.buttonBox)
## If anything changes in the tree, print a message
def change(param, changes):
print("tree changes:")
for param, change, data in changes:
path = self.p.childPath(param)
if path is not None:
childName = '.'.join(path)
else:
childName = param.name()
print(' parameter: %s'% childName)
print(' change: %s'% change)
print(' data: %s'% str(data))
print(' ----------')
self.p.sigTreeStateChanged.connect(change)
def onPressAccepted(self):
self.hide()
self.viewer.onClickedComputeFeaturesImpl(self.p)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.hide()
event.accept()
else:
super(QtGui.QDialog, self).keyPressEvent(event)
|
MaayanLab/clustergrammer-widget
|
clustergrammer_widget/clustergrammer/load_data.py
|
Python
|
mit
| 2,352
| 0.022109
|
import io, sys
import json
import pandas as pd
from . import categories
from . import proc_df_labels
from . import data_formats
from . import make_unique_labels
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def load_file(net, filename):
# reset network when loaing file, prevents errors when loading new file
# have persistent categories
# trying to improve re-initialization
# net.__init__()
net.reset()
f = open(filename, 'r')
file_string = f.read()
f.close()
load_file_as_string(net, file_string, filename)
def load_file_as_string(net, file_string, filename=''):
if (sys.version_info > (3, 0)):
# python 3
####################
file_string = str(file_string)
else:
# python 2
####################
file_string = unicode(file_string)
buff = io.StringIO(file_string)
if '/' in filename:
filename = filename.split('/')[-1]
net.load_tsv_to_net(buff, filename)
def load_stdin(net):
data = ''
for line in sys.stdin:
data = data + line
data = StringIO.StringIO(data)
net.load_tsv_to_net(data)
def load_tsv_to_net(net, file_buffer, filename=None):
lines = file_buffer.getvalue().split('\n')
num_labels = categories.check_categories(lines)
row_arr = list(range(num_labels['row']))
col_arr = list(range(num_labels['col']))
tmp_df = {}
# use header if there are col categories
if len(col_arr) > 1:
tmp_df['mat'] = pd.read_table(file_buffer, index_col=row_arr,
header=col_arr)
else:
tmp_df['mat'] = pd.read_table(file_buffer, index_col=row_arr)
tmp_df = proc_df_labels.main(tmp_df)
net.df_to_dat(tmp_df, True)
net.dat['filename'] = filename
def load_json_to_dict(filename):
f = open(filename, 'r')
inst_dict = json.load(f)
f.close()
return inst_dict
def load_gmt(filename):
f = open(filename, 'r')
line
|
s = f.readlines()
f.close()
gmt = {}
for i in range(len(lines)):
inst_line = lines[i].rstrip()
inst_term = inst_line.split('\t')[0]
inst_elems = inst_line.split('\t')[2:]
gmt[inst_term] = inst_elems
return gmt
def load_data_to_net(net, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
net.dat['nodes'] = inst_net['nodes']
net.dat['mat'] = inst_ne
|
t['mat']
data_formats.mat_to_numpy_arr(net)
|
g2p/xtraceback
|
xtraceback/tracebackcompat.py
|
Python
|
mit
| 3,453
| 0.001158
|
import functools
import sys
import traceback
from stacked import Stacked
from .xtraceback import XTraceback
class TracebackCompat(Stacked):
"""
A context manager that patches the stdlib traceback module
Functions in the traceback module that exist as a method of this class are
replaced with equivalents that use XTraceback.
:cvar NOPRINT: Exception types that we don't print for (includes None)
:type NOPRINT: tuple
:ivar defaults: Default options to apply to XTracebacks created by this
instance
:type defaults: dict
"""
NOPRINT = (None, KeyboardInterrupt)
def __init__(self, **defaults):
super(TracebackCompat,
|
self).__init__()
self.defaults = defaults
# register patches for methods that wrap traceback functions
for key in dir(traceback):
if hasattr(self, key):
|
self._register_patch(traceback, key, getattr(self, key))
#def __exit__(self, etype, evalue, tb):
#if etype not in self.NOPRINT:
#self.print_exception(etype, evalue, tb)
#super(TracebackCompat, self).__exit__(etype, evalue, tb)
def _factory(self, etype, value, tb, limit=None, **options):
options["limit"] = \
getattr(sys, "tracebacklimit", None) if limit is None else limit
_options = self.defaults.copy()
_options.update(options)
return XTraceback(etype, value, tb, **_options)
def _print_factory(self, etype, value, tb, limit=None, file=None,
**options):
# late binding here may cause problems where there is no sys i.e. on
# google app engine but it is required for cases where sys.stderr is
# rebound i.e. under nose
if file is None and hasattr(sys, "stderr"):
file = sys.stderr
options["stream"] = file
return self._factory(etype, value, tb, limit, **options)
@functools.wraps(traceback.format_tb)
def format_tb(self, tb, limit=None, **options):
xtb = self._factory(None, None, tb, limit, **options)
return xtb.format_tb()
@functools.wraps(traceback.format_exception_only)
def format_exception_only(self, etype, value, **options):
xtb = self._factory(etype, value, None, **options)
return xtb.format_exception_only()
@functools.wraps(traceback.format_exception)
def format_exception(self, etype, value, tb, limit=None, **options):
xtb = self._factory(etype, value, tb, limit, **options)
return xtb.format_exception()
@functools.wraps(traceback.format_exc)
def format_exc(self, limit=None, **options):
options["limit"] = limit
return "".join(self.format_exception(*sys.exc_info(), **options))
@functools.wraps(traceback.print_tb)
def print_tb(self, tb, limit=None, file=None, **options):
xtb = self._print_factory(None, None, tb, limit, file, **options)
xtb.print_tb()
@functools.wraps(traceback.print_exception)
def print_exception(self, etype, value, tb, limit=None, file=None,
**options):
xtb = self._print_factory(etype, value, tb, limit, file, **options)
xtb.print_exception()
@functools.wraps(traceback.print_exc)
def print_exc(self, limit=None, file=None, **options):
options["limit"] = limit
options["file"] = file
self.print_exception(*sys.exc_info(), **options)
|
chromium/chromium
|
third_party/tensorflow-text/src/tensorflow_text/python/ops/regex_split_ops.py
|
Python
|
bsd-3-clause
| 10,403
| 0.003557
|
# coding=utf-8
# Copyright 2021 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""This file contains the python libraries for the regex_split op."""
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
gen_regex_split_ops = load_library.load_op_library(resource_loader.get_path_to_datafile('_regex_split_ops.so'))
from tensorflow_text.python.ops import splitter
# pylint: disable= redefined-builtin
def regex_split_with_offsets(input,
delim_regex_pattern,
keep_delim_regex_pattern="",
name=None):
r"""Split `input` by delimiters that match a regex pattern; returns offsets.
`regex_split_with_offsets` will split `input` using delimiters that match a
regex pattern in `delim_regex_pattern`. It will return three tensors:
one containing the split substrings ('result' in the examples below), one
containing the offsets of the starts of each substring ('begin' in the
examples below), and one containing the offsets of the ends of each substring
('end' in the examples below).
Here is an example:
>>> text_input=["hello there"]
>>> # split by whitespace
>>> result, begin, end = regex_split_with_offsets(input=text_input,
... delim_regex_pattern="\s")
>>> print("result: %s\nbegin: %s\nend: %s" % (result, begin, end))
result: <tf.RaggedTensor [[b'hello', b'there']]>
begin: <tf.RaggedTensor [[0, 6]]>
end: <tf.RaggedTensor [[5, 11]]>
By default, delimiters are not included in the split string results.
Delimiters may be included by specifying a regex pattern
`keep_delim_regex_pattern`. For example:
>>> text_input=["hello there"]
>>> # split by whitespace
>>> result, begin, end = regex_split_with_offsets(input=text_input,
... delim_regex_pattern="\s",
... keep_delim_regex_pattern="\s")
>>> print("result: %s\nbegin: %s\nend: %s" % (result, begin, end))
result: <tf.RaggedTensor [[b'hello', b' ', b'there']]>
begin: <tf.RaggedTensor [[0, 5, 6]]>
end: <tf.RaggedTensor [[5, 6, 11]]>
If there are multiple delimiters in a row, there are no empty splits emitted.
For example:
>>> text_input=["hello there"] # Note the two spaces between the words.
>>> # split by whitespace
>>> result, begin, end = regex_split_with_offsets(input=text_input,
... delim_regex_pattern="\s")
>>> print("result: %s\nbegin: %s\nend: %s" % (result, begin, end))
result: <tf.RaggedTensor [[b'hello', b'there']]>
begin: <tf.RaggedTensor [[0, 7]]>
end: <tf.RaggedTensor [[5, 12]]>
See https://github.com/google/re2/wiki/Syntax for the full list of supported
expressions.
Args:
input: A Tensor or RaggedTensor of string input.
delim_regex_pattern: A string containing the regex pattern of a delimiter.
keep_delim_regex_pattern: (optional) Regex pattern of delimiters that should
be kept in the result.
name: (optional) Name of the op.
Returns:
A tuple of RaggedTensors containing:
(split_results, begin_offsets, end_offsets)
where tokens is of type string, begin_offsets and end_offsets are of type
int64.
"""
# Convert input to ragged or tensor
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, dtype=dtypes.string)
# Handle RaggedTensor inputs by recursively processing the `flat_values`.
if ragged_tensor.is_ragged(input):
# Split the `flat_values` of the input.
tokens, begin_offsets, end_offsets = regex_split_with_offsets(
input.flat_values, delim_regex_pattern, keep_delim_regex_pattern, name)
# Copy outer dimenion partitions from `input` to the output tensors.
tokens_rt = input.with_flat_values(token
|
s)
begin_offsets_rt = input.with_flat_values(begin_offsets)
end
|
_offsets_rt = input.with_flat_values(end_offsets)
return tokens_rt, begin_offsets_rt, end_offsets_rt
delim_regex_pattern = b"".join(
[b"(", delim_regex_pattern.encode("utf-8"), b")"])
keep_delim_regex_pattern = b"".join(
[b"(", keep_delim_regex_pattern.encode("utf-8"), b")"])
# reshape to a flat Tensor (if not already)
input_shape = math_ops.cast(array_ops.shape(input), dtypes.int64)
input_reshaped = array_ops.reshape(input, [-1])
# send flat_values to regex_split op.
tokens, begin_offsets, end_offsets, row_splits = (
gen_regex_split_ops.regex_split_with_offsets(input_reshaped,
delim_regex_pattern,
keep_delim_regex_pattern))
# Pack back into ragged tensors
tokens_rt = ragged_tensor.RaggedTensor.from_row_splits(
tokens, row_splits=row_splits)
begin_offsets_rt = ragged_tensor.RaggedTensor.from_row_splits(
begin_offsets,
row_splits=row_splits)
end_offsets_rt = ragged_tensor.RaggedTensor.from_row_splits(
end_offsets, row_splits=row_splits)
# If the original input was a multi-dimensional Tensor, add back the
# dimensions
static_rank = input.get_shape().ndims
if static_rank is not None and static_rank > 1:
i = array_ops.get_positive_axis(-1, input.get_shape().ndims)
for i in range(
array_ops.get_positive_axis(-1,
input.get_shape().ndims), 0, -1):
tokens_rt = ragged_tensor.RaggedTensor.from_uniform_row_length(
values=tokens_rt, uniform_row_length=input_shape[i])
begin_offsets_rt = ragged_tensor.RaggedTensor.from_uniform_row_length(
values=begin_offsets_rt, uniform_row_length=input_shape[i])
end_offsets_rt = ragged_tensor.RaggedTensor.from_uniform_row_length(
values=end_offsets_rt, uniform_row_length=input_shape[i])
return tokens_rt, begin_offsets_rt, end_offsets_rt
# pylint: disable= redefined-builtin
def regex_split(input,
delim_regex_pattern,
keep_delim_regex_pattern="",
name=None):
r"""Split `input` by delimiters that match a regex pattern.
`regex_split` will split `input` using delimiters that match a
regex pattern in `delim_regex_pattern`. Here is an example:
>>> text_input=["hello there"]
>>> # split by whitespace
>>> regex_split(input=text_input,
... delim_regex_pattern="\s")
<tf.RaggedTensor [[b'hello', b'there']]>
By default, delimiters are not included in the split string results.
Delimiters may be included by specifying a regex pattern
`keep_delim_regex_pattern`. For example:
>>> text_input=["hello there"]
>>> # split by whitespace
>>> regex_split(input=text_input,
... delim_regex_pattern="\s",
... keep_delim_regex_pattern="\s")
<tf.RaggedTensor [[b'hello', b' ', b'there']]>
If there are multiple delimiters in a row, there are no empty splits emitted.
For example:
>>> text_input=["hello there"] # Note the two spaces between the words.
>>> # split by whitespace
>>> regex_split(input=text_input,
... delim_regex_pattern="\s")
<tf.RaggedTensor [[b'hello', b'there']]>
See https://github.com/google/re2/wiki/Syntax for the full list of supported
expressions.
Args:
input: A Tensor or RaggedTensor of string input.
delim_regex_pattern: A string containing the regex pattern of a de
|
dandesousa/lapis
|
tests/test_slugs.py
|
Python
|
cc0-1.0
| 2,165
| 0.000924
|
#!/usr/bin/env python
# encoding: utf-8
import os
import tempfile
import unittest
class TestSlug(unittest.TestCase):
"""tests features related to creating slugs"""
def setUp(self):
self.tempd_path = tempfile.mkdtemp()
def tearDown(self):
import shutil
shutil.rmtree(self.tempd_path)
def test_title(self):
from
|
lapis.slug import slugify
slug = slugify("The World's Greatest Title")
self.assertTrue("the-world's-greatest-title", slug)
def test_unique_slug_with_date(self):
from lapis
|
.slug import unique_path_and_slug
from lapis.slug import slugify
from lapis.formats import default_format
from datetime import datetime
title = "My Unique Title"
path, slug = unique_path_and_slug(title, self.tempd_path, date=datetime.now())
expected_fn = "{}-{}.{}".format(datetime.now().strftime("%Y-%m-%d"), slugify(title), default_format.extension)
self.assertEqual(expected_fn, os.path.basename(path))
def test_unique_path_and_slug_single(self):
from lapis.slug import unique_path_and_slug
from lapis.slug import slugify
from lapis.formats import default_format
title = "My Unique Title"
path, slug = unique_path_and_slug(title, self.tempd_path)
self.assertEqual(os.path.dirname(path), self.tempd_path)
expected_fn = slugify(title) + "." + default_format.extension
self.assertEqual(expected_fn, os.path.basename(path))
self.assertEqual(slug, slugify(title))
def test_unique_path_and_slug_existing(self):
from lapis.slug import unique_path_and_slug
from lapis.slug import slugify
title = "My Unique Title"
path, slug = unique_path_and_slug(title, self.tempd_path)
self.assertEqual(slug, slugify(title))
prev_slugs = [slug]
for i in range(100):
with open(path, "w"):
pass
path, slug = unique_path_and_slug(title, self.tempd_path)
self.assertNotIn(slug, prev_slugs)
prev_slugs.append(slug)
self.assertFalse(os.path.exists(path))
|
comic/comic-django
|
app/grandchallenge/github/migrations/0004_auto_20210916_0746.py
|
Python
|
apache-2.0
| 769
| 0
|
# Generated by Django 3.1.13 on 2021-09-16 07:46
from django.db imp
|
ort migrations, models
class Migration(migrations.Migration):
dependencies = [
("github", "0003_githubusertoken"),
]
operations = [
migrations.AddField(
|
model_name="githubwebhookmessage",
name="error",
field=models.TextField(blank=True),
),
migrations.AddField(
model_name="githubwebhookmessage",
name="has_open_source_license",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="githubwebhookmessage",
name="license_check_result",
field=models.CharField(blank=True, max_length=1024),
),
]
|
harsha5500/pytelegrambot
|
bots/doloresBot.py
|
Python
|
gpl-3.0
| 3,494
| 0.002862
|
__author__ = 'harsha'
import telegram_methods.getMe
import telegram_methods.getUpdates
import telegram_methods.sendMessage
import telegram.Update
import telegram.Message
import telegram.User
import telegram.Gr
|
oupChat
import bot_utilities.tgtwitter
import re
base_url = "https://api.tele
|
gram.org/bot"
auth_file_name = "../bots/doloresBot.auth"
auth_file = open(auth_file_name, 'r')
auth_token = auth_file.readline()
consumer_key = auth_file.readline()
consumer_secret = auth_file.readline()
access_token = auth_file.readline()
access_token_secret = auth_file.readline()
auth_file.close()
# Remove the newline at end of file.
auth_token = auth_token[:-1]
consumer_key = consumer_key[:-1]
print(consumer_key)
consumer_secret = consumer_secret[:-1]
print(consumer_secret)
access_token = access_token[:-1]
print(access_token)
access_token_secret = access_token_secret[:-1]
print(access_token_secret)
cfg = {
"consumer_key": consumer_key,
"consumer_secret": consumer_secret,
"access_token": access_token,
"access_token_secret": access_token_secret
}
bot_generic_message = "Hello, I am Dead End Dolores. I like turning left at dead ends and Harry Potter."
tejas_message = "how how"
my_bot = base_url + auth_token
# On first run last update was 0
last_update_id = 0
first_run = True
# Find the chat id of the conversation
def setChatID(chat_obj):
# chat_obj = message_obj.get_chat()
if type(chat_obj) == telegram.GroupChat.GroupChat:
reply_chat_id = chat_obj.get_user_id()
else:
reply_chat_id = chat_obj.get_id()
return reply_chat_id
while True:
updates = telegram_methods.getUpdates.getUpdates(my_bot, last_update_id, None, None)
# print(len(updates), "Last ID:" + str(last_update_id))
updateObj = None
messageObj = None
if len(updates) > 0:
updateObj = updates[0]
messageObj = updateObj.get_message()
# Ignore all old messages.
if first_run:
print(updates)
if len(updates) > 0:
last_update_id = updateObj.get_update_id() + 1
else:
last_update_id += 1
first_run = False
elif len(updates) == 0:
continue
else:
for count in range(0, len(updates)):
updateObj = updates[0]
messageObj = updateObj.get_message()
chatObj = messageObj.get_chat()
chat_id = setChatID(chatObj)
# Debugging start
# print(messageObj.get_text())
# print(messageObj)
# Debugging end
if messageObj.get_text() == "/dolores":
telegram_methods.sendMessage.send_message(my_bot, chat_id, bot_generic_message)
if messageObj.get_text() == "/tejas":
telegram_methods.sendMessage.send_message(my_bot, chat_id, tejas_message)
if "/tweet" in messageObj.get_text() and last_update_id > 0:
tweet_message = re.sub('\/tweet', '', messageObj.get_text())
tweet_message = tweet_message[:140]
if tweet_message == "":
# print("Empty Message")
telegram_methods.sendMessage.send_message(my_bot, chat_id, "Empty tweet?")
continue
else:
status = bot_utilities.tgtwitter.tweet(cfg, tweet_message)
telegram_methods.sendMessage.send_message(my_bot, chat_id, "Tweeted:" + tweet_message)
last_update_id = updateObj.get_update_id() + 1
|
galtay/data_sci_ale
|
code_kata_04/kata_04.py
|
Python
|
gpl-3.0
| 2,540
| 0.000787
|
import pandas
WEATHER_FNAME = 'weather.dat'
FOOTBALL_FNAME = 'football.dat'
def read_weather(fname=WEATHER_FNAME):
"""Read the weather file into a DataFrame and return it.
Pandas has many input routines (all prefixed with "read")
- http://pandas.pydata.org/pandas-docs/stable/io.html
Examining the weather.dat file we see that it has 17 columns. This file
might look like it's white space delimited but no! This, my friends, is
a fixed width file. Although Pandas allows arbitrary regular expressions
for delimiter values (for example we could use "\s+" for one or more white
spaces) there are some columns that have no values and this would break.
For example, the column HDDay has no values until the 9th row. Using "one
or more white spaces" as the delimiter would make 53.8 the value for HDDay
in the first row.
The function that we want is pandas.read_fwf (for fixed width file). It
turns out that pandas.read_fwf is *almost* smart enough to automatically
determine the widths of the columns. In the end we need to specify them
to get the last columns read correctly.
"""
# things I tried that don't work
# 1) df = pandas.read_csv(fname)
# 2) df = pandas.read_csv(fname, delimiter=' ')
# 3) df = pandas.read_csv(fname, delimiter='\s+')
# 4) df = pandas.read_fwf(fname)
df = pandas.read_fwf(
fname, widths=[4, 6, 6, 6, 7, 6, 5, 6, 6, 6, 5, 4, 4, 4, 4, 4, 6])
# we still have a row on top full of NaN because there was a blank line
# just below the header. we could use dropna(axis=0, how='all') but that
# would also drop any rows that happen to be empty in the middle of the
# data. instead we can simply use drop(0) which is the label of the row.
# also note that almost every pandas operation returns a new object and
# doesn't operate in place so we assign the results to df.
df = df.drop(0)
return df
if __name__ == '__main__':
# I usually use a naming convention that appends "_df" to DataFrames
weather_df = read_weather()
# Pandas guesses the types for each column. "object" is a native python
# string and what Pandas defaults to when it cant guess.
print(weather_df.dtypes)
print()
# you can index columns by passing a string or a
|
list of strings
# into the square bracket operator
print(
|
weather_df['WxType'])
print
print(weather_df[['HDDay', 'AvSLP']])
print
# "loc" and "iloc" are ways to index into the DataFrame
|
fpeyre/shinken
|
shinken/daemons/brokerdaemon.py
|
Python
|
agpl-3.0
| 33,165
| 0.002503
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import traceback
import cPickle
import base64
import zlib
import threading
from multiprocessing import active_children
from shinken.satellite import BaseSatellite
from shinken.property import PathProp, IntegerProp
from shinken.util import sort_by_ids, get_memory
from shinken.log import logger
from shinken.stats import statsmgr
from shinken.external_command import ExternalCommand
from shinken.http_client import HTTPClient, HTTPExceptions
from shinken.daemon import Daemon, Interface
class IStats(Interface):
"""
Interface for various stats about broker activity
"""
doc = 'Get raw stats from the daemon'
def get_raw_stats(self):
app = self.app
res = []
insts = [inst for inst in app.modules_manager.instances if inst.is_external]
for inst in insts:
try:
res.append({'module_name': inst.get_name(), 'queue_size': inst.to_q.qsize()})
except Exception, exp:
res.append({'module_name': inst.get_name(), 'queue_size': 0})
return res
get_raw_stats.doc = doc
# Our main APP class
class Broker(BaseSatellite):
properties = BaseSatellite.properties.copy()
properties.update({
'pidfile': PathProp(default='brokerd.pid'),
'port': IntegerProp(default=7772),
'local_log': PathProp(default='brokerd.log'),
})
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file, profile=''):
super(Broker, self).__init__('broker', config_file, is_daemon, do_replace, debug,
debug_file)
# Our arbiters
self.arbiters = {}
# Our pollers, reactionners and receivers
self.pollers = {}
self.reactionners = {}
self.receivers = {}
# Modules are load one time
self.have_modules = False
# Can have a queue of external_commands given by modules
# will be processed by arbiter
self.external_commands = []
# All broks to manage
self.broks = [] # broks to manage
# broks raised this turn and that needs to be put in self.broks
self.broks_internal_raised = []
# broks raised by the arbiters, we need a lock so the push can be in parallel
# to our current activities and won't lock the arbiter
self.arbiter_broks = []
self.arbiter_broks_lock = threading.RLock()
self.timeout = 1.0
self.istats = IStats(self)
# Schedulers have some queues. We can simplify the call by adding
# elements into the proper queue just by looking at their type
# Brok -> self.broks
# TODO: better tag ID?
# External commands -> self.external_commands
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == 'bro
|
k':
# For brok, we TAG brok with our instance_id
elt.instance_id = 0
self.broks_internal_raised.append(elt)
return
elif cls_type == 'externalcommand':
logger.debug("Enqueuing an external command '%s'", str(ExternalCommand.__dict__))
self.extern
|
al_commands.append(elt)
# Maybe we got a Message from the modules, it's way to ask something
# like from now a full data from a scheduler for example.
elif cls_type == 'message':
# We got a message, great!
logger.debug(str(elt.__dict__))
if elt.get_type() == 'NeedData':
data = elt.get_data()
# Full instance id means: I got no data for this scheduler
# so give me all dumbass!
if 'full_instance_id' in data:
c_id = data['full_instance_id']
source = elt.source
logger.info('The module %s is asking me to get all initial data '
'from the scheduler %d',
source, c_id)
# so we just reset the connection and the running_id,
# it will just get all new things
try:
self.schedulers[c_id]['con'] = None
self.schedulers[c_id]['running_id'] = 0
except KeyError: # maybe this instance was not known, forget it
logger.warning("the module %s ask me a full_instance_id "
"for an unknown ID (%d)!", source, c_id)
# Maybe a module tells me that it's dead, I must log it's last words...
if elt.get_type() == 'ICrash':
data = elt.get_data()
logger.error('the module %s just crash! Please look at the traceback:',
data['name'])
logger.error(data['trace'])
# The module death will be looked for elsewhere and restarted.
# Get the good tabs for links by the kind. If unknown, return None
def get_links_from_type(self, d_type):
t = {'scheduler': self.schedulers,
'arbiter': self.arbiters,
'poller': self.pollers,
'reactionner': self.reactionners,
'receiver': self.receivers
}
if d_type in t:
return t[d_type]
return None
# Check if we do not connect to often to this
def is_connection_try_too_close(self, elt):
now = time.time()
last_connection = elt['last_connection']
if now - last_connection < 5:
return True
return False
# wrapper function for the real function do_
# just for timing the connection
def pynag_con_init(self, id, type='scheduler'):
_t = time.time()
r = self.do_pynag_con_init(id, type)
statsmgr.timing('con-init.%s' % type, time.time() - _t, 'perf')
return r
# initialize or re-initialize connection with scheduler or
# arbiter if type == arbiter
def do_pynag_con_init(self, id, type='scheduler'):
# Get the good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.debug('Type unknown for connection! %s', type)
return
# default timeout for daemons like pollers/reactionners/...
timeout = 3
data_timeout = 120
if type == 'scheduler':
# If sched is not active, I do not try to init
# it is just useless
is_active = links[id]['active']
if not is_active:
return
# schedulers also got real timeout to respect
timeout = links[id]['timeout']
data_timeout = links[id]['data_timeout']
# If we try to connect too much, we slow down our tests
if self.is_connection_try_too_close(links[id]):
return
# Ok, we can now update it
links[id]['last_connection'] = time.time()
# DBG: print "Init connection with", links[id]['uri']
running_id = links[id]['running_id']
# DBG: print "Running id before connection", running_id
uri = links[id]['uri']
try:
con = links[id]['con'] = HTTPClient(uri=uri,
|
bauerj/electrum-server
|
src/storage.py
|
Python
|
mit
| 22,202
| 0.003288
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import plyvel
import ast
import hashlib
import os
import sys
import threading
from processor import print_log, logger
from utils import bc_address_to_hash_160, hash_160_to_pubkey_address, Hash, \
bytes8_to_int, bytes4_to_int, int_to_bytes8, \
int_to_hex8, int_to_bytes4, int_to_hex4
"""
Patricia tree for hashing unspents
"""
# increase this when database needs to be updated
global GENESIS_HASH
GENESIS_HASH = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
DB_VERSION = 3
KEYLENGTH = 56 # 20 + 32 + 4
class Node(object):
def __init__(self, s):
self.k = int(s[0:32].encode('hex'), 16)
self.s = s[32:]
if self.k==0 and self.s:
print "init error", len(self.s), "0x%0.64X" % self.k
raise BaseException("z")
def serialized(self):
k = "0x%0.64X" % self.k
k = k[2:].decode('hex')
assert len(k) == 32
return k + self.s
def has(self, c):
return (self.k & (1<<(ord(c)))) != 0
def is_singleton(self, key):
assert self.s != ''
return len(self.s) == 40
def get_singleton(self):
for i in xrange(256):
if self.k == (1<<i):
return chr(i)
raise BaseException("get_singleton")
def indexof(self, c):
assert self.k != 0 or self.s == ''
x = 0
for i in xrange(ord(c)):
if (self.k & (1<<i)) != 0:
x += 40
return x
def get(self, c):
x = self.indexof(c)
ss = self.s[x:x+40]
_hash = ss[0:32]
value = bytes8_to_int(ss[32:40])
return _hash, value
def set(self, c, h, value):
if h is None:
h = chr(0)*32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
if self
|
.has(c):
self.remove(c)
x = self.indexof(c)
self.s = self.s[0:x] + item + self.s[x:]
self.k |= (1<<ord(c)
|
)
assert self.k != 0
def remove(self, c):
x = self.indexof(c)
self.k &= ~(1<<ord(c))
self.s = self.s[0:x] + self.s[x+40:]
def get_hash(self, x, parent):
if x:
assert self.k != 0
skip_string = x[len(parent)+1:] if x != '' else ''
x = 0
v = 0
hh = ''
for i in xrange(256):
if (self.k&(1<<i)) != 0:
ss = self.s[x:x+40]
hh += ss[0:32]
v += bytes8_to_int(ss[32:40])
x += 40
try:
_hash = Hash(skip_string + hh)
except:
_hash = None
if x:
assert self.k != 0
return _hash, v
@classmethod
def from_dict(klass, d):
k = 0
s = ''
for i in xrange(256):
if chr(i) in d:
k += 1<<i
h, value = d[chr(i)]
if h is None: h = chr(0)*32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
s += item
k = "0x%0.64X" % k # 32 bytes
k = k[2:].decode('hex')
assert len(k) == 32
out = k + s
return Node(out)
class DB(object):
def __init__(self, path, name, cache_size):
self.db = plyvel.DB(os.path.join(path, name), create_if_missing=True, compression=None, lru_cache_size=cache_size)
self.batch = self.db.write_batch()
self.cache = {}
self.lock = threading.Lock()
def put(self, key, s):
self.batch.put(key, s)
self.cache[key] = s
def get(self, key):
s = self.cache.get(key)
if s == 'deleted':
return None
if s is None:
with self.lock:
s = self.db.get(key)
return s
def delete(self, key):
self.batch.delete(key)
self.cache[key] = 'deleted'
def close(self):
self.db.close()
def write(self):
with self.lock:
self.batch.write()
self.batch.clear()
self.cache.clear()
def get_next(self, key):
with self.lock:
i = self.db.iterator(start=key)
k, _ = i.next()
return k
class Storage(object):
def __init__(self, config, shared, test_reorgs):
self.shared = shared
self.hash_list = {}
self.parents = {}
self.skip_batch = {}
self.test_reorgs = test_reorgs
# init path
self.dbpath = config.get('leveldb', 'path')
if not os.path.exists(self.dbpath):
os.mkdir(self.dbpath)
try:
self.db_utxo = DB(self.dbpath, 'utxo', config.getint('leveldb', 'utxo_cache'))
self.db_hist = DB(self.dbpath, 'hist', config.getint('leveldb', 'hist_cache'))
self.db_addr = DB(self.dbpath, 'addr', config.getint('leveldb', 'addr_cache'))
self.db_undo = DB(self.dbpath, 'undo', None)
except:
logger.error('db init', exc_info=True)
self.shared.stop()
try:
self.last_hash, self.height, db_version = ast.literal_eval(self.db_undo.get('height'))
except:
print_log('Initializing database')
self.height = 0
self.last_hash = GENESIS_HASH
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
db_version = DB_VERSION
self.put_node('', Node.from_dict({}))
# check version
if db_version != DB_VERSION:
print_log("Your database '%s' is deprecated. Please create a new database"%self.dbpath)
self.shared.stop()
return
# pruning limit
try:
self.pruning_limit = ast.literal_eval(self.db_undo.get('limit'))
except:
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
self.db_undo.put('version', repr(self.pruning_limit))
# compute root hash
root_node = self.get_node('')
self.root_hash, coins = root_node.get_hash('', None)
# print stuff
print_log("Database version %d."%db_version)
print_log("Pruning limit for spent outputs is %d."%self.pruning_limit)
print_log("Blockchain height", self.height)
print_log("UTXO tree root hash:", self.root_hash.encode('hex'))
print_log("Coins in database:", coins)
# convert between bitcoin addresses and 20 bytes keys used for storage.
@staticmethod
def address_to_key(addr):
return bc_address_to_hash_160(addr)
def get_skip(self, key):
o = self.skip_batch.get(key)
if o is not None:
return o
k = self.db_utxo.get_next(key)
assert k.startswith(key)
return k[len(key):]
def set_skip(self, key, skip):
self.skip_batch[key] = skip
def get_proof(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
p = self.get_path(k)
p.append(k)
out = []
for item in p:
v = sel
|
MayankAgarwal/euler_py
|
024/euler024.py
|
Python
|
mit
| 991
| 0.009082
|
def __init_break_indices__(num):
indices = [1]*num
for i in xrange(1, num):
indices[num-1-i] = indices[num-i]*i
return indices
def get_lex_permutation (N, base_string, break_indices):
stringPermutation = []
divident = []
base_string = list(base_string)
divident_prefix = 0
for i in xrange(len(base_string)):
if (i-1) >= 0:
temp = ( N - 1 - divident_prefix )/break_indices[i]
else:
tem
|
p = ( N - 1 )/break_indices[i]
temp_int = int(temp)
stringPermutation.append(base_string[temp_int])
base_string.pop(temp_int)
divident_prefix += temp_int*break_indices[i]
return ''.join(stringPermutation)
__STRING = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm']
__BREAK_INDICES = __init_break_indices__(len(__STRING))
tests = int(raw_input())
for test in xrange(tests):
N = int(raw_input())
print get_lex_permu
|
tation(N, __STRING, __BREAK_INDICES)
|
cslarsen/vev
|
vev/server_py3.py
|
Python
|
lgpl-2.1
| 180
| 0.011111
|
import http.server
i
|
mport urllib.parse
class BaseServer(http.server.BaseHTTPRequestHandler):
pass
HTTPServer = http.server.HTTPServer
urlli
|
b_urlparse = urllib.parse.urlparse
|
ArthurZey/toyproblems
|
projecteuler/0007_10001st_prime.py
|
Python
|
mit
| 1,337
| 0.016455
|
#!/usr/bin/env python
'''
https://projecteuler.net/problem=7
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10001st prime number?
'''
import math
def prime(n):
# we start with the knowledge of at least one prime
primes = [2]
# and the next possible prime is the next odd number
number_to_test = 3
# we're going to grow the primes list until we have the n-th prime
while len(primes) < n:
# start with the default assumption that number_to_test is prime
is_prime =
|
True
# since, by construction, all the primes less than number_to_test have already been found,
# we need only test the possible_divisors in primes up to the square root of number_to_test
# to see if they divide number_to_test before confirming or disproving that number_to_test
# is indeed prime
for possible_divisor in primes:
if possi
|
ble_divisor >= math.floor(math.sqrt(number_to_test)) + 1:
is_prime = True
break
if number_to_test%possible_divisor == 0:
is_prime = False
break
if is_prime:
primes.append(number_to_test)
# in any event, move on to the next candidate (the next odd number)
number_to_test += 2
# return the last prime
return primes[-1]
print(prime(10001))
|
awacha/cct
|
cct/core2/instrument/components/datareduction/__init__.py
|
Python
|
bsd-3-clause
| 115
| 0
|
from .datareduction import Data
|
Reduction
from .datareductionpipeline import DataReductionPipeLine, Proces
|
singError
|
csirtgadgets/csirtg-mail-py
|
csirtg_mail/constants.py
|
Python
|
lgpl-3.0
| 72
| 0
|
import
|
sys
PYVERSION = 2
if sys.version_info >
|
(3,):
PYVERSION = 3
|
jamesward-demo/air-quick-fix
|
AIRQuickFixServer/pyamf/tests/test_sol.py
|
Python
|
apache-2.0
| 6,453
| 0.004029
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2008 The PyAMF Project.
# See LICENSE for details.
"""
Tests for Local Shared Object (LSO) Implementation.
@author: U{Nick Joyce<mailto:nick@boxdesign.co.uk>}
@since: 0.1.0
"""
import unittest, os.path, warnings
import pyamf
from pyamf import sol
warnings.simplefilter('ignore', RuntimeWarning)
class DecoderTestCase(unittest.TestCase):
def test_header(self):
bytes = '\x00\xbf\x00\x00\x00\x15TCSO\x00\x04\x00\x00\x00\x00\x
|
00\x05hello\x00\x00\x00\x00'
try:
sol.decode(bytes)
except:
raise
self.fail("Error decoding stream")
def test_invalid_header(self):
bytes = '\x00\x00\x00\x00\x00\x15TCSO\x00\x04\x00\x00\x00\x00\x00\x05hello\x00\x00\x00\x00'
self.assertRaises
|
(pyamf.DecodeError, sol.decode, bytes)
def test_invalid_header_length(self):
bytes = '\x00\xbf\x00\x00\x00\x05TCSO\x00\x04\x00\x00\x00\x00\x00\x05hello\x00\x00\x00\x00'
self.assertRaises(pyamf.DecodeError, sol.decode, bytes)
def test_strict_header_length(self):
bytes = '\x00\xbf\x00\x00\x00\x00TCSO\x00\x04\x00\x00\x00\x00\x00\x05hello\x00\x00\x00\x00'
try:
sol.decode(bytes, strict=False)
except:
self.fail("Error occurred decoding stream")
def test_invalid_signature(self):
bytes = '\x00\xbf\x00\x00\x00\x15ABCD\x00\x04\x00\x00\x00\x00\x00\x05hello\x00\x00\x00\x00'
self.assertRaises(pyamf.DecodeError, sol.decode, bytes)
def test_invalid_header_name_length(self):
bytes = '\x00\xbf\x00\x00\x00\x15TCSO\x00\x04\x00\x00\x00\x00\x00\x01hello\x00\x00\x00\x00'
self.assertRaises(pyamf.DecodeError, sol.decode, bytes)
def test_invalid_header_padding(self):
bytes = '\x00\xbf\x00\x00\x00\x15TCSO\x00\x04\x00\x00\x00\x00\x00\x05hello\x00\x00\x01\x00'
self.assertRaises(pyamf.DecodeError, sol.decode, bytes)
def test_unknown_encoding(self):
bytes = '\x00\xbf\x00\x00\x00\x15TCSO\x00\x04\x00\x00\x00\x00\x00\x05hello\x00\x00\x00\x01'
self.assertRaises(ValueError, sol.decode, bytes)
def test_amf3(self):
bytes = '\x00\xbf\x00\x00\x00aTCSO\x00\x04\x00\x00\x00\x00\x00\x08' + \
'EchoTest\x00\x00\x00\x03\x0fhttpUri\x06=http://localhost:8000' + \
'/gateway/\x00\x0frtmpUri\x06+rtmp://localhost/echo\x00'
self.assertEquals(sol.decode(bytes), (u'EchoTest',
{u'httpUri': u'http://localhost:8000/gateway/', u'rtmpUri': u'rtmp://localhost/echo'}))
class EncoderTestCase(unittest.TestCase):
def test_encode_header(self):
stream = sol.encode('hello', {})
self.assertEquals(stream.getvalue(),
'\x00\xbf\x00\x00\x00\x15TCSO\x00\x04\x00\x00\x00\x00\x00\x05hello\x00\x00\x00\x00')
def test_multiple_values(self):
stream = sol.encode('hello', {'name': 'value', 'spam': 'eggs'})
self.assertEquals(stream.getvalue(), HelperTestCase.contents)
def test_amf3(self):
bytes = '\x00\xbf\x00\x00\x00aTCSO\x00\x04\x00\x00\x00\x00\x00\x08' + \
'EchoTest\x00\x00\x00\x03\x0fhttpUri\x06=http://localhost:8000' + \
'/gateway/\x00\x0frtmpUri\x06+rtmp://localhost/echo\x00'
stream = sol.encode(u'EchoTest',
{u'httpUri': u'http://localhost:8000/gateway/', u'rtmpUri': u'rtmp://localhost/echo'}, encoding=pyamf.AMF3)
self.assertEquals(stream.getvalue(), bytes)
class HelperTestCase(unittest.TestCase):
contents = '\x00\xbf\x00\x00\x002TCSO\x00\x04\x00\x00\x00\x00\x00\x05h' + \
'ello\x00\x00\x00\x00\x00\x04name\x02\x00\x05value\x00\x00\x04spam' + \
'\x02\x00\x04eggs\x00'
def setUp(self):
self.file_name = os.tempnam()
def tearDown(self):
if os.path.isfile(self.file_name):
os.unlink(self.file_name)
def _load(self):
fp = open(self.file_name, 'wb+')
fp.write(self.contents)
fp.flush()
return fp
def test_load_name(self):
fp = self._load()
fp.close()
s = sol.load(self.file_name)
self.assertEquals(s.name, 'hello')
self.assertEquals(s, {'name': 'value', 'spam': 'eggs'})
def test_load_file(self):
fp = self._load()
y = fp.tell()
fp.seek(0)
s = sol.load(fp)
self.assertEquals(s.name, 'hello')
self.assertEquals(s, {'name': 'value', 'spam': 'eggs'})
self.assertEquals(y, fp.tell())
def test_save_name(self):
s = sol.SOL('hello')
s.update({'name': 'value', 'spam': 'eggs'})
sol.save(s, self.file_name)
fp = open(self.file_name, 'rb')
try:
self.assertEquals(fp.read(), self.contents)
except:
fp.close()
raise
def test_save_file(self):
fp = open(self.file_name, 'wb+')
s = sol.SOL('hello')
s.update({'name': 'value', 'spam': 'eggs'})
sol.save(s, fp)
fp.seek(0)
self.assertFalse(fp.closed)
self.assertEquals(fp.read(), self.contents)
fp.close()
class SOLTestCase(unittest.TestCase):
def test_create(self):
s = sol.SOL('eggs')
self.assertEquals(s, {})
self.assertEquals(s.name, 'eggs')
def test_save(self):
s = sol.SOL('hello')
s.update({'name': 'value', 'spam': 'eggs'})
x = os.tempnam()
s.save(x)
try:
self.assertEquals(open(x, 'rb').read(), HelperTestCase.contents)
except:
if os.path.isfile(x):
os.unlink(x)
raise
x = os.tempnam()
fp = open(x, 'wb+')
self.assertEquals(fp.closed, False)
s.save(fp)
self.assertNotEquals(fp.tell(), 0)
fp.seek(0)
self.assertEquals(fp.read(), HelperTestCase.contents)
self.assertEquals(fp.closed, False)
try:
self.assertEquals(open(x, 'rb').read(), HelperTestCase.contents)
except:
if os.path.isfile(x):
os.unlink(x)
raise
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EncoderTestCase))
suite.addTest(unittest.makeSuite(DecoderTestCase))
suite.addTest(unittest.makeSuite(HelperTestCase))
suite.addTest(unittest.makeSuite(SOLTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
nvoron23/TextBlob
|
textblob/translate.py
|
Python
|
mit
| 2,924
| 0.00171
|
# -*- coding: utf-8 -*-
"""
Translator module that uses the Google Translate API.
Adapted from Terry Yin's google-translate-python.
Language detection added by Steven Loria.
"""
from __future__ import absolute_import
import json
import re
import codecs
from textblob.compat import PY2, request, urlencode
from textblob.exceptions import TranslatorError
class Translator(object):
"""A language translator and detector.
Usage:
::
>>> from textblob.translate import Translator
>>> t = Translator()
>>> t.translate('hello', from_lang='en', to_lang='fr')
u'bonjour'
>>> t.detect("hola")
u'es'
"""
url = "http://translate.google.com/translate_a/t"
headers = {'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) '
'AppleWebK
|
it/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19')}
def translate(self, source, from_lang=None, to_lang='en', host=None, type_=None):
"""Translate the source text from one
|
language to another."""
if PY2:
source = source.encode('utf-8')
data = {"client": "p", "ie": "UTF-8", "oe": "UTF-8",
"sl": from_lang, "tl": to_lang, "text": source}
json5 = self._get_json5(self.url, host=host, type_=type_, data=data)
return self._get_translation_from_json5(json5)
def detect(self, source, host=None, type_=None):
"""Detect the source text's language."""
if PY2:
source = source.encode('utf-8')
if len(source) < 3:
raise TranslatorError('Must provide a string with at least 3 characters.')
data = {"client": "p", "ie": "UTF-8", "oe": "UTF-8", "text": source}
json5 = self._get_json5(self.url, host=host, type_=type_, data=data)
lang = self._get_language_from_json5(json5)
return lang
def _get_language_from_json5(self, content):
json_data = json.loads(content)
if 'src' in json_data:
return json_data['src']
return None
def _get_translation_from_json5(self, content):
result = u""
json_data = json.loads(content)
if 'sentences' in json_data:
result = ''.join([s['trans'] for s in json_data['sentences']])
return _unescape(result)
def _get_json5(self, url, host=None, type_=None, data=None):
encoded_data = urlencode(data).encode('utf-8')
req = request.Request(url=url, headers=self.headers, data=encoded_data)
if host or type_:
req.set_proxy(host=host, type=type_)
resp = request.urlopen(req)
content = resp.read()
return content.decode('utf-8')
def _unescape(text):
"""Unescape unicode character codes within a string.
"""
pattern = r'\\{1,2}u[0-9a-fA-F]{4}'
decode = lambda x: codecs.getdecoder('unicode_escape')(x.group())[0]
return re.sub(pattern, decode, text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.