repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dallingham/regenerate
|
regenerate/extras/regrst.py
|
1
|
20854
|
#
# Manage registers in a hardware design
#
# Copyright (C) 2008 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Produces RestructuredText documentation from the definition of the register.
Docutils is used to convert the output to the desired format. Currently, only
HTML is supported now.
"""
try:
from docutils.core import publish_parts
_HTML = True
except:
_HTML = False
from cStringIO import StringIO
from regenerate.db import TYPE_TO_SIMPLE_TYPE
import re
from token import full_token, in_groups, uvm_name
CSS = '''
<style type="text/css">
table td{
padding: 3pt;
font-size: 10pt;
}
table th{
padding: 3pt;
font-size: 11pt;
}
table th.field-name{
padding-bottom: 0pt;
padding-left: 5pt;
font-size: 10pt;
}
table td.field-body{
padding-bottom: 0pt;
font-size: 10pt;
}
table{
border-spacing: 0pt;
}
h1{
font-family: Arial,Helvetica,Sans;
font-size: 12pt;
}
h1.title{
font-family: Arial,Helvetica,Sans;
font-size: 14pt;
}
body{
font-size: 10pt;
font-family: Arial,Helvetica,Sans;
}
div.admonition, div.attention, div.caution, div.danger, div.error,
div.hint, div.important, div.note, div.tip, div.warning {
margin: 2em ;
border: medium outset ;
padding: 1em }
div.admonition p.admonition-title, div.hint p.admonition-title,
div.important p.admonition-title, div.note p.admonition-title,
div.tip p.admonition-title {
font-weight: bold ;
font-family: sans-serif }
div.attention p.admonition-title, div.caution p.admonition-title,
div.danger p.admonition-title, div.error p.admonition-title,
div.warning p.admonition-title {
color: red ;
font-weight: bold ;
font-family: sans-serif }
span.overline, span.bar {
text-decoration: overline;
}
.fraction, .fullfraction {
display: inline-block;
vertical-align: middle;
text-align: center;
}
.fraction .fraction {
font-size: 80%;
line-height: 100%;
}
span.numerator {
display: block;
}
span.denominator {
display: block;
padding: 0ex;
border-top: thin solid;
}
sup.numerator, sup.unit {
font-size: 70%;
vertical-align: 80%;
}
sub.denominator, sub.unit {
font-size: 70%;
vertical-align: -20%;
}
span.sqrt {
display: inline-block;
vertical-align: middle;
padding: 0.1ex;
}
sup.root {
font-size: 70%;
position: relative;
left: 1.4ex;
}
span.radical {
display: inline-block;
padding: 0ex;
font-size: 150%;
vertical-align: top;
}
span.root {
display: inline-block;
border-top: thin solid;
padding: 0ex;
vertical-align: middle;
}
span.symbol {
line-height: 125%;
font-size: 125%;
}
span.bigsymbol {
line-height: 150%;
font-size: 150%;
}
span.largesymbol {
font-size: 175%;
}
span.hugesymbol {
font-size: 200%;
}
span.scripts {
display: inline-table;
vertical-align: middle;
}
.script {
display: table-row;
text-align: left;
line-height: 150%;
}
span.limits {
display: inline-table;
vertical-align: middle;
}
.limit {
display: table-row;
line-height: 99%;
}
sup.limit, sub.limit {
line-height: 100%;
}
span.symbolover {
display: inline-block;
text-align: center;
position: relative;
float: right;
right: 100%;
bottom: 0.5em;
width: 0px;
}
span.withsymbol {
display: inline-block;
}
span.symbolunder {
display: inline-block;
text-align: center;
position: relative;
float: right;
right: 80%;
top: 0.3em;
width: 0px;
}
</style>
'''
def reg_addr(register, offset):
base = register.address + offset
if register.ram_size > 32:
return "%08x - %08x" % (base, base + register.ram_size)
else:
return "%08x" % base
def norm_name(text):
if text is not None:
return text.lower().replace(" ", "-").replace("_", "-")
else:
return ""
class RegisterRst:
"""
Produces documentation from a register definition
"""
def __init__(self, register,
regset_name=None,
project=None,
inst=None,
highlight=None,
show_defines=True,
show_uvm=False,
decode=None,
group=None,
maxlines=9999999,
db=None,
max_values=24,
bootstrap=False,
header_level=1):
self._max_values = max_values
self._reg = register
self._highlight = highlight
self._prj = project
self._regset_name = regset_name
self._show_defines = show_defines
self._show_uvm = show_uvm
self._group = group
self._inst = inst
self._maxlines = maxlines
self._bootstrap = bootstrap
self._header_level = header_level
if db is None:
self.reglist = set()
else:
self.reglist = set([reg.register_name for reg in db.get_all_registers()])
if decode:
try:
if isinstance(decode, str) or isinstance(decode, unicode):
decode = int(decode, 16)
elif isinstance(decode, int):
decode = decode
else:
decode = None
except ValueError:
decide = None
self._decode = decode
self._db = db
def html_css(self, text=""):
"""
Returns the definition with the basic, default CSS provided
"""
return CSS + self.html(text)
def text(self, line):
return line.strip()
def restructured_text(self, text=""):
"""
Returns the definition of the register in RestructuredText format
"""
o = StringIO()
self.str_title(o)
self.str_overview(o)
if self._reg.ram_size < 32: # Temporary hack
self._write_bit_fields(o)
if self._show_defines:
self._write_defines(o, True, False)
o.write(text)
return o.getvalue()
def refname(self, reg_name):
return "%s-%s-%s" % (norm_name(self._inst),
norm_name(self._group),
norm_name(reg_name))
def field_ref(self, name):
return "%s-%s-%s-%s" % (norm_name(self._inst),
norm_name(self._group),
norm_name(self._reg.register_name),
norm_name(name))
def str_title(self, o=None):
ret_str = False
if o is None:
o = StringIO()
ret_str = True
o.write(".. _%s:\n\n" % self.refname(self._reg.register_name))
if ret_str:
return o.getvalue()
def str_title(self, o=None):
ret_str = False
if o is None:
o = StringIO()
ret_str = True
rlen = len(self._reg.register_name) + 2
o.write(".. _%s:\n\n" % self.refname(self._reg.register_name))
o.write(self._reg.register_name)
o.write("\n%s\n\n" % ('_' * rlen))
if ret_str:
return o.getvalue()
def str_overview(self, o=None):
ret_str = False
if o is None:
o = StringIO()
ret_str = True
o.write("%s\n\n" %
self._reg.description.encode('ascii', 'replace'))
if ret_str:
return o.getvalue()
def str_bit_fields(self, o=None):
ret_str = False
if o is None:
o = StringIO()
ret_str = True
o.write(".. role:: resetvalue\n\n")
o.write(".. role:: editable\n\n")
o.write(".. role:: mono\n\n")
o.write(".. list-table::\n")
o.write(" :name: bit_table\n")
o.write(" :widths: 8, 10, 7, 25, 50\n")
if self._bootstrap:
o.write(" :class: table table-bordered table-striped table-condensed display\n")
else:
o.write(" :class: bit-table\n")
o.write(" :header-rows: 1\n\n")
o.write(" * - Bits\n")
if self._decode:
o.write(" - Decode\n")
else:
o.write(" - Reset\n")
o.write(" - Type\n")
o.write(" - Name\n")
o.write(" - Description\n")
last_index = self._reg.width - 1
extra_text = []
for field in reversed(self._reg.get_bit_fields()):
if field.msb != last_index:
display_reserved(o, last_index, field.msb + 1)
if field.width == 1:
o.write(" * - %02d\n" % field.lsb)
else:
o.write(" * - %02d:%02d\n" % (field.msb, field.lsb))
if self._decode:
val = (self._decode & mask(field.msb, field.lsb)) >> field.lsb
if val != field.reset_value:
o.write(" - :resetvalue:`0x%x`\n" % val)
else:
o.write(" - 0x%x\n" % val)
else:
o.write(" - 0x%x\n" % field.reset_value)
o.write(" - %s\n" % TYPE_TO_SIMPLE_TYPE[field.field_type])
o.write(" - %s\n" % field.field_name)
descr = field.description.strip()
marked_descr = "\n ".join(descr.split("\n"))
encoded_descr = marked_descr.encode('ascii', 'replace').rstrip()
lines = encoded_descr.split("\n")
if len(lines) > self._maxlines:
o.write(" - See :ref:`Description for %s <%s>`\n" % (field.field_name, self.field_ref(field.field_name)))
extra_text.append((self.field_ref(field.field_name), field.field_name, encoded_descr))
else:
o.write(" - %s\n" % encoded_descr)
if field.values and len(field.values) < self._max_values:
o.write("\n")
for val in sorted(field.values,
key=lambda x: int(int(x[0], 16))):
if val[1] and val[2]:
o.write(" 0x%x : %s\n %s\n\n" %
(int(val[0], 16), val[1], val[2]))
elif val[1]:
o.write(" 0x%x : %s\n %s\n\n" %
(int(val[0], 16), val[1], "*no description available*"))
else:
o.write(" 0x%x : %s\n %s\n\n" %
(int(val[0], 16), "*no token available*", val[2]))
last_index = field.lsb - 1
if last_index >= 0:
display_reserved(o, last_index, 0)
for ref, name, descr in extra_text:
o.write(".. _%s:\n\n" % ref)
title = "Description for %s\n" % name
o.write(title)
o.write("+" * len(title))
o.write("\n\n")
o.write(descr)
o.write("\n\n")
if ret_str:
return o.getvalue()
def _write_bit_fields(self, o):
o.write("Bit fields\n+++++++++++++++++++++++++++\n\n")
self.str_bit_fields(o)
def _write_defines(self, o, use_uvm=True, use_id=True):
o.write("\n\nAddresses\n+++++++++++++++++++++++\n\n")
self.str_defines(o, use_uvm, use_id)
def str_defines(self, o=None, use_uvm=True, use_id=True):
ret_str = False
if o is None:
o = StringIO()
ret_str = True
x_addr_maps = self._prj.get_address_maps()
instances = in_groups(self._regset_name, self._prj)
addr_maps = set([])
for inst in instances:
for x in x_addr_maps:
groups_in_addr_map = self._prj.get_address_map_groups(x.name)
if inst.group in groups_in_addr_map:
addr_maps.add(x)
if len(addr_maps) == 0:
o.write(".. warning::\n")
o.write(" :class: alert alert-warning\n\n")
o.write(" This register has not been mapped into any address space.\n\n")
elif in_groups(self._regset_name, self._prj):
o.write(".. list-table::\n")
o.write(" :header-rows: 1\n")
if len(addr_maps) == 1:
o.write(" :widths: 50, 50\n")
elif len(addr_maps) == 2:
o.write(" :widths: 50, 25, 25\n")
elif len(addr_maps) == 3:
o.write(" :widths: 50, 16, 16, 17\n")
if self._bootstrap:
o.write(" :class: table table-bordered table-striped table-condensed\n\n")
else:
o.write(" :class: summary\n\n")
o.write(" *")
if use_uvm:
o.write(" - Register Name\n")
if use_id:
if use_uvm:
o.write(" ")
o.write(" - ID\n")
for amap in addr_maps:
o.write(" - %s\n" % amap.name)
for inst in in_groups(self._regset_name, self._prj):
if self._group and inst.group != self._group:
continue
if self._inst and inst.inst != self._inst:
continue
for grp_inst in range(0, inst.grpt):
found_addr = True
if inst.repeat == 1 and not inst.array:
if self._reg.dimension <= 1:
self._addr_entry(o, inst, use_uvm, use_id,
addr_maps, grp_inst, -1, -1)
else:
for i in range(self._reg.dimension):
self._addr_entry(o, inst, use_uvm, use_id,
addr_maps, grp_inst, -1, i)
else:
for gi in range(0, inst.repeat):
if self._reg.dimension <= 1:
self._addr_entry(o, inst, use_uvm, use_id,
addr_maps, grp_inst, gi, -1)
else:
for i in range(self._reg.dimension):
self._addr_entry(o, inst, use_uvm, use_id,
addr_maps, grp_inst, gi, i)
o.write("\n\n")
if ret_str:
return o.getvalue()
def _addr_entry(self, o, inst, use_uvm, use_id, addr_maps,
grp_inst, group_index, index):
if inst.grpt == 1:
u_grp_name = inst.group
t_grp_name = inst.group
else:
u_grp_name = "{0}[{1}]".format(inst.group, grp_inst)
t_grp_name = "{0}{1}".format(inst.group, grp_inst)
o.write(" *")
if use_uvm:
name = uvm_name(u_grp_name, self._reg.token, inst.inst, group_index)
if index < 0:
o.write(" - %s\n" % name)
else:
o.write(" - %s[%d]\n" % (name, index))
if use_id:
name = full_token(t_grp_name, self._reg.token,
inst.inst, group_index, inst.format)
if use_uvm:
o.write(" ")
o.write(" - %s\n" % name)
for map_name in addr_maps:
map_base = self._prj.get_address_base(map_name.name)
offset = map_base + inst.offset + inst.base + (
grp_inst * inst.grpt_offset)
if group_index > 0:
offset += group_index * inst.roffset
if index < 0:
o.write(" - ``%s``\n" % reg_addr(self._reg, offset))
else:
o.write(" - ``%s``\n" % reg_addr(self._reg, offset
+ (index * (self._reg.width/8))))
def _display_uvm_entry(self, inst, index, o):
name = full_token(inst.group, self._reg.token, self._regset_name,
index, inst.format)
o.write(" * - %s\n" % name)
name = uvm_name(inst.group, self._reg.token, inst.inst, index)
o.write(" - %s\n" % name)
def _write_uvm(self, o):
"""
Writes the UVM path name(s) for the register as a table
in restructuredText format.
"""
o.write("\n\n")
o.write("UVM names\n")
o.write("---------\n")
o.write(".. list-table::\n")
o.write(" :header-rows: 1\n")
if self._bootstrap:
o.write(" :class: table table-bordered table-striped table-condensed\n\n")
else:
o.write(" :class: summary\n\n")
o.write(" * - ID\n")
o.write(" - UVM name\n")
for inst in in_groups(self._regset_name, self._prj):
if self._group and inst.group != self._group:
continue
if self._inst and inst.inst != self._inst:
continue
if inst.repeat == 1:
self._display_uvm_entry(inst, -1, o)
else:
for i in range(0, inst.repeat):
self._display_uvm_entry(inst, i, o)
o.write("\n\n")
def html_from_text(self, text, links=None):
if text is None:
return "No data"
if _HTML:
refs = []
if links:
for vals in re.findall("`[^`]+`_", text):
v = vals[1:-2]
if v in links:
refs.append(".. _`%s`: %s" % (v, links[v]))
try:
if self._header_level > 1:
overrides = {
'initial_header_level': self._header_level,
'doctitle_xform': False,
'report_level': 'quiet'
}
else:
overrides = {
'report_level': 'quiet'
}
parts = publish_parts(
text + "\n".join(refs),
writer_name="html",
settings_overrides=overrides
)
if self._highlight is None:
return parts['html_title'] + parts['html_subtitle'] + parts['body']
else:
paren_re = re.compile("(%s)" % self._highlight, flags=re.IGNORECASE)
return parts['html_title'] + parts['html_subtitle'] + \
paren_re.sub(r"<mark>\1</mark>", parts['body'])
except TypeError, msg:
return "<h3>Error</h3><p>" + str(msg) + "</p><p>" + text + "</p>"
except AttributeError, msg:
return "<h3>Error</h3><p>" + str(msg) + "</p><p>" + text + "</p>"
except ZeroDivisionError:
return "<h3>Error in Restructured Text</h3>Please contact the developer to get the documentation fixed"
else:
return "<pre>{0}</pre>".format(self.restructured_text())
def html(self, text="", links=None):
"""
Produces a HTML subsection of the document (no header/body).
"""
return self.html_from_text(self.restructured_text(text), links)
def html_bit_fields(self, text="", links=None):
return self.html_from_text(self.str_bit_fields() + "\n" + text, links)
def html_title(self, links=None):
return self.html_from_text(self.str_title(), links)
def html_addresses(self, text="", links=None):
return self.html_from_text(self.str_defines(None, True, False) + "\n" + text, links)
def html_overview(self, text="", links=None):
return self.html_from_text(self.str_overview() + "\n" + text, links)
def display_reserved(o, stop, start):
if stop == start:
o.write(" * - ``%02d``\n" % stop)
else:
o.write(" * - ``%02d:%02d``\n" % (stop, start))
o.write(' - ``0x0``\n')
o.write(' - RO\n')
o.write(' - \n')
o.write(' - *reserved*\n')
def mask(stop, start):
value = 0
for i in range(start, stop + 1):
value |= (1 << i)
return value
|
gpl-2.0
| 8,270,414,968,194,198,000
| 29.577713
| 125
| 0.493143
| false
| 3.561742
| false
| false
| false
|
pddring/pygame-examples
|
02-different-colours.py
|
1
|
1462
|
# pygame example: github.com/pddring/pygame-examples
"""
This example shows you how to experiment with different colours in pygame.
It will fill the whole pygame window in different colours
Here are some things to try to adapt the code:
TODO: Make the screen appear black
TODO: Make the screen appear blue
TODO: Make the screen appear yellow
"""
# import pygame module
import pygame
# show the pygame window
pygame.init()
screen = pygame.display.set_mode((400,300))
pygame.display.set_caption("Pygame Example")
# fill the screen in white (Red = 255/255, Green = 255/255, Blue = 255/255)
screen.fill((255,255,255))
"""
Pygame doesn't draw directly to the screen. If it did, games would look messy
because you'd see each item being drawn one after the other, rather than
just seeing the whole game screen appear 'instantly'
Think of it like drawing a comic book scene on a piece of A4 paper.
You hold up the paper so everyone else can see the blank side whilst you draw
on the side facing you. When you've finished drawing your picture, you flip
over the paper so everyone can see the finished picture while you draw the next
scene on the other side.
"""
# update the display
pygame.display.flip()
# This stops the pygame window from closing straight away
raw_input("Press enter to go red")
# Fill the screen in red (Red = 255/255, Green = 0/255, Blue = 0/255)
screen.fill((255,0,0))
pygame.display.flip()
raw_input("Press enter to quit")
pygame.quit()
|
unlicense
| -3,988,145,037,399,547,400
| 30.106383
| 79
| 0.75513
| false
| 3.600985
| false
| false
| false
|
Faaux/DingoEngine
|
HelperScripts/parseC.py
|
1
|
8282
|
from pathlib import Path
from multiprocessing import Pool, freeze_support
import os
import datetime
import clang.cindex
import subprocess
from paths import path_to_components, path_to_gameobjects, path_to_cmake, path_to_src
args = ["-xc++",
"-D__CODE_GENERATOR__",
"-std=c++17",
"-IC:/Projects/DingoEngine/src",
"-IC:/Projects/DingoEngine/src/misc",
"-IC:/Projects/DingoEngine/src/graphics",
"-IC:/Projects/DingoEngine/src/components",
"-IC:/Projects/DingoEngine/ThirdParty/SDL-mirror/include",
"-IC:/Projects/DingoEngine/ThirdParty/glm",
"-IC:/Projects/DingoEngine/ThirdParty/glad/include",
"-IC:/Projects/DingoEngine/ThirdParty/imgui",
"-IC:/Projects/DingoEngine/ThirdParty/imguiGizmo",
"-IC:/Projects/DingoEngine/ThirdParty/tinyGltf",
"-IC:/Projects/DingoEngine/ThirdParty/freetype-2.9",
"-IC:/Projects/DingoEngine/ThirdParty/stb",
"-IC:/Projects/DingoEngine/ThirdParty/physx-3.4/Include",
"-Ic:/Program Files/LLVM/include"]
class Field:
def __init__(self, cursor):
self.name = cursor.spelling
self.attributes = []
self.type = cursor.type.spelling
for c in cursor.get_children():
if c.kind == clang.cindex.CursorKind.ANNOTATE_ATTR:
self.attributes.append(c.spelling or c.displayname)
def __str__(self) -> str:
result = "{}({})".format(self.name, self.type)
if self.attributes:
result += " -> {}".format(" ,".join(self.attributes))
return result
class Class:
def __init__(self, cursor):
self.cursor = cursor
self.name = cursor.spelling or cursor.displayname
self.isTypeBase = False
self.fields = []
self.attributes = []
self.base = []
for c in cursor.get_children():
if c.kind == clang.cindex.CursorKind.FIELD_DECL:
f = Field(c)
self.fields.append(f)
elif c.kind == clang.cindex.CursorKind.ANNOTATE_ATTR:
self.attributes.append(c.spelling or c.displayname)
elif c.kind == clang.cindex.CursorKind.CXX_BASE_SPECIFIER:
self.base.append(c.type.spelling)
assert (len(self.base) <= 1)
def __str__(self) -> str:
result = "{}".format(self.name)
if self.attributes:
result += " -> {}".format(" ,".join(self.attributes))
return result
class File:
def __init__(self, filename):
self.classes = []
self.candidateClasses = {}
self.filename: Path = filename
self.output_filename_h: Path = filename.name.replace(".h", ".generated.h")
self.output_filename_cpp: Path = filename.name.replace(".h", ".generated.cpp")
def needs_update(self):
path = self.filename.parent / "generated"
path_gen_header = (path / self.output_filename_h)
path_gen_cpp = (path / self.output_filename_cpp)
if path_gen_header.is_file() and path_gen_cpp.is_file():
modify_generated_h = path_gen_header.lstat().st_mtime
modify_generated_cpp = path_gen_cpp.lstat().st_mtime
modify_header = self.filename.lstat().st_mtime
if modify_header < modify_generated_h and modify_header < modify_generated_cpp:
return False
return True
def generate(self):
# Output to file
index = clang.cindex.Index.create()
translation_unit = index.parse(str(self.filename), args)
self.build_classes(translation_unit.cursor)
candidates = [c for c in self.candidateClasses.values() if
self.filename.samefile(Path(c.cursor.location.file.name)) and len(c.base) == 1]
for candidate in candidates:
base = candidate
while len(base.base) == 1:
key = base.base[0]
if key not in self.candidateClasses.keys():
break
base = self.candidateClasses[key]
if base.cursor.type.spelling == "DG::TypeBase":
self.classes.append(candidate)
path = self.filename.parent / "generated"
if len(self.classes) > 0:
self.print()
# Make sure folder exists
if not os.path.exists(str(path)):
os.makedirs(str(path))
with open(str(path / self.output_filename_h), "w") as file:
output_file(file, False, self.output_filename_h, self)
with open(str(path / self.output_filename_cpp), "w") as file:
output_file(file, True, self.output_filename_cpp, self)
# format outputted files
arguments = [r"c:\Program Files\LLVM\bin\clang-format.exe", "-i", "-style=file",
str(path / self.output_filename_h), str(path / self.output_filename_cpp)]
subprocess.Popen(arguments)
def build_classes(self, cursor):
for c in cursor.get_children():
if c.kind == clang.cindex.CursorKind.NAMESPACE:
self.build_classes(c)
if c.location.file and path_to_src in Path(c.location.file.name).parents:
if c.kind == clang.cindex.CursorKind.CLASS_DECL or c.kind == clang.cindex.CursorKind.STRUCT_DECL:
a_class = Class(c)
self.candidateClasses[c.type.spelling] = a_class
def print(self):
print(self.filename)
indent_size = 4
for c in self.classes:
indent = " " * indent_size
print(indent + "+-- " + c.name)
indent = indent + "| "
for f in c.fields:
print(indent + "+-- " + f.name)
def __str__(self) -> str:
return "{}(File)".format(str(self.filename))
def output_file(file, with_impl, filename, ast_file):
file.write("/**\n"
"* @file {}\n"
"* @author Generated by DingoGenerator (written by Faaux)\n"
"* @date {}\n"
"* This file was generated, do not edit!"
"*/\n"
"\n".format(filename, datetime.datetime.now().strftime("%d %B %Y")))
file.write(
"#pragma once\n"
)
if with_impl:
file.write('#include "engine/Serialize.h"\n')
file.write('#include "{}"\n'.format(filename.replace(".cpp", ".h")))
file.write(
'#include "../{}"\n'
"\n".format(ast_file.filename.name)
)
file.write(
"namespace DG\n"
"{\n"
)
# Output all functions here
for c in ast_file.classes:
parsed_class: Class = c
file.write(
'void Serialize{}(const {}* item, nlohmann::json& json)'.format(parsed_class.name,
parsed_class.name))
if with_impl:
file.write(
'\n'
'{\n'
)
# Find all attributes in files and export them here!
for f in parsed_class.fields:
field: Field = f
if field.attributes:
if field.attributes[0] == "DPROPERTY":
file.write(
' json["{}"] = Serialize(item->{});\n'.format(field.name, field.name)
)
file.write(
'}\n'
)
else:
file.write(";\n")
file.write(
"} // namespace DG\n"
)
def generate_for_file(file):
file.generate()
def main():
freeze_support()
pathlist = list(Path(path_to_components).glob('**/*.h'))
pathlist = pathlist + list(Path(path_to_gameobjects).glob('**/*.h'))
filelist = [File(p) for p in pathlist
if not str(p).endswith(".generated.h")
and not str(p).endswith("Actor.h")
and not str(p).endswith("BaseComponent.h")]
filelist = [f for f in filelist if f.needs_update()]
if len(filelist) > 0:
if len(filelist) >= 4:
p = Pool(4)
p.map(generate_for_file, filelist)
else:
for file in filelist:
file.generate()
path_to_cmake.touch()
if __name__ == "__main__":
main()
|
mit
| -4,985,849,433,667,174,000
| 33.65272
| 113
| 0.545037
| false
| 3.786923
| false
| false
| false
|
MiroK/DolfinSurface
|
demo/undocumented/meshfunction/python/demo_meshfunction.py
|
1
|
1231
|
"""This demo illustrates use of the MeshFunction class.
Original implementation: ../cpp/main.cpp by Ola Skavhaug."""
# Copyright (C) 2007 Kristian B. Oelgaard
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2007-11-15
# Last changed: 2008-03-31
from dolfin import *
# Read mesh from file
mesh = Mesh("../unitsquare_2_2.xml.gz")
# Read mesh function from file
file_in = File("../unitsquare_2_2_subdomains.xml.gz")
f = MeshFunction("double", mesh)
file_in >> f
# Write mesh function to file
out = File("meshfunction_out.xml.gz");
out << f
# Plot mesh function
plot(f, interactive=True)
|
gpl-3.0
| -5,013,314,975,686,665,000
| 29.775
| 77
| 0.735987
| false
| 3.428969
| false
| false
| false
|
google-research/remixmatch
|
pseudo_label.py
|
1
|
5410
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pseudo-label: The simple and efficient semi-supervised learning method fordeep neural networks.
Reimplementation of http://deeplearning.net/wp-content/uploads/2013/03/pseudo_label_final.pdf
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import utils, data, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class PseudoLabel(models.MultiModel):
def model(self, batch, lr, wd, ema, warmup_pos, consistency_weight, threshold, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
l = tf.one_hot(l_in, self.nclass)
wd *= lr
warmup = tf.clip_by_value(tf.to_float(self.step) / (warmup_pos * (FLAGS.train_kimg << 10)), 0, 1)
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
logits_x = classifier(xt_in, training=True)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Take only first call to update batch norm.
logits_y = classifier(y_in, training=True)
# Get the pseudo-label loss
loss_pl = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.argmax(logits_y, axis=-1), logits=logits_y
)
# Masks denoting which data points have high-confidence predictions
greater_than_thresh = tf.reduce_any(
tf.greater(tf.nn.softmax(logits_y), threshold),
axis=-1,
keepdims=True,
)
greater_than_thresh = tf.cast(greater_than_thresh, loss_pl.dtype)
# Only enforce the loss when the model is confident
loss_pl *= greater_than_thresh
# Note that we also average over examples without confident outputs;
# this is consistent with the realistic evaluation codebase
loss_pl = tf.reduce_mean(loss_pl)
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=l, logits=logits_x)
loss = tf.reduce_mean(loss)
tf.summary.scalar('losses/xe', loss)
tf.summary.scalar('losses/pl', loss_pl)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss + loss_pl * warmup * consistency_weight,
colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = PseudoLabel(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
warmup_pos=FLAGS.warmup_pos,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
smoothing=FLAGS.smoothing,
consistency_weight=FLAGS.consistency_weight,
threshold=FLAGS.threshold,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('consistency_weight', 1., 'Consistency weight.')
flags.DEFINE_float('threshold', 0.95, 'Pseudo-label threshold.')
flags.DEFINE_float('warmup_pos', 0.4, 'Relative position at which constraint loss warmup ends.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('smoothing', 0.1, 'Label smoothing.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
|
apache-2.0
| 1,373,210,983,404,664,600
| 41.598425
| 113
| 0.65268
| false
| 3.461292
| false
| false
| false
|
nik-hil/scripts
|
palindrome.py
|
1
|
1731
|
'''This program finds to pallindrome in a string by taking each character as a
center of pallindrome. From center it probes in both direction this pallindrome
exists.
A pallindrome might exists in space between two characters e.g, "bb"
'''
def palindrome(string):
'''test cases
>>> palindrome("")
>>> palindrome("a")
'a'
>>> palindrome("ab")
'a'
>>> palindrome("bb")
'bb'
>>> palindrome("abcba")
'abcba'
>>> palindrome("efabcbad")
'abcba'
'''
if not string:
return None
max_range = ()
max_length = 0
for i in range(len(string)):
current_range = find_palindrome(i, string)
new_length = current_range[1] - current_range[0] + 1
if max_length < new_length :
max_length = new_length
max_range = current_range
return string[max_range[0]:max_range[1] + 1]
def find_palindrome(i, string):
len_str = len(string)
len_first = 0
len_second = 0
low, high = find_palindrome_range(i, i, string, len_str)
if low == i and high == i:
len_first = (low, high)
else:
len_first = (low + 1, high - 1)
low, high = find_palindrome_range(i, i+1, string, len_str)
if low == i and high == i + 1:
len_second = (low, high - 1)
else:
len_second = (low + 1, high - 1)
if len_first[1] - len_first[0] > len_second[1] - len_second[0]:
return len_first
else:
return len_second
def find_palindrome_range(low, high, string, len_str):
while (low > -1 and high < len_str and string[low] == string[high]):
low -= 1
high += 1
return low, high
if __name__ == "__main__":
import doctest
doctest.testmod()
|
mit
| -8,828,558,457,101,507,000
| 26.919355
| 79
| 0.569613
| false
| 3.17033
| false
| false
| false
|
milapour/palm
|
palm/blink_state_enumerator.py
|
1
|
8844
|
from types import IntType
from palm.util import multichoose
from palm.state_collection import StateCollectionFactory
class SingleDarkState(object):
"""
A macrostate for a BlinkModel with one dark microstate.
The available microstates are `I`, `A`, `D`, and `B`.
Attributes
----------
initial_state_flag : bool
This flag is used by BlinkModel when creating an initial
probability vector. Expected to be true only for the
macrostate in which `I` is the only microstate with nonzero
population.
Parameters
----------
id_str : string
A label that is used to identify to this macrostate.
I,A,D,B : int
The populations of the respective microstates.
observation_class : string
The aggregated class to which this macrostate belongs.
"""
def __init__(self, id_str, I, A, D, B, observation_class):
self.id = id_str
self.I = I
self.A = A
self.D = D
self.B = B
self.observation_class = observation_class
self.initial_state_flag = False
def __str__(self):
return "%s %s" % (self.id, self.observation_class)
def as_array(self):
return numpy.array([self.I, self.A, self.D, self.B])
def get_id(self):
return self.id
def get_class(self):
return self.observation_class
def is_initial_state(self):
return self.initial_state_flag
def set_initial_state_flag(self):
self.initial_state_flag = True
def as_dict(self):
return {'observation_class':self.get_class(),
'I':self.I, 'A':self.A, 'D':self.D, 'B':self.B}
class DoubleDarkState(object):
"""
A macrostate for a BlinkModel with one dark microstate.
The available microstates are `I`, `A`, `D1`, `D2`, and `B`.
Attributes
----------
initial_state_flag : bool
This flag is used by BlinkModel when creating an initial
probability vector. Expected to be true only for the
macrostate in which `I` is the only microstate with nonzero
population.
Parameters
----------
id_str : string
A label that is used to identify to this macrostate.
I,A,D1,D2,B : int
The populations of the respective microstates.
observation_class : string
The aggregated class to which this macrostate belongs.
"""
def __init__(self, id_str, I, A, D1, D2, B, observation_class):
self.id = id_str
self.I = I
self.A = A
self.D1 = D1
self.D2 = D2
self.B = B
self.observation_class = observation_class
self.initial_state_flag = False
def __str__(self):
return "%s %s" % (self.id, self.observation_class)
def as_array(self):
return numpy.array([self.I, self.A, self.D1, self.D2, self.B])
def get_id(self):
return self.id
def get_class(self):
return self.observation_class
def is_initial_state(self):
return self.initial_state_flag
def set_initial_state_flag(self):
self.initial_state_flag = True
def as_dict(self):
return {'observation_class':self.get_class(),
'I':self.I, 'A':self.A, 'D1':self.D1, 'D2':self.D2, 'B':self.B}
class SingleDarkStateEnumeratorFactory(object):
"""
Creates a state enumerator for a BlinkModel with one dark state.
Attributes
----------
num_microstates : int
Parameters
----------
N : int
The total number of fluorophores.
state_factory : class
Factory class for State objects.
max_A : int
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, N, state_factory=SingleDarkState, max_A=5):
assert type(N) is IntType
self.N = N
self.state_factory = state_factory
self.max_A = max_A
self.num_microstates = len(['I', 'A', 'D', 'B'])
def create_state_enumerator(self):
"""
Creates a method that builds a StateCollection, made up of
all possible macrostates in the model, subject to the
constraint that no states with `A` > `max_A` are allowed.
Returns
-------
enumerate_states : callable f()
A method that builds a StateCollection.
"""
def enumerate_states():
"""
Builds a StateCollection for a model with one dark state.
No states with `A` > `max_A` are allowed.
Returns
-------
state_collection : StateCollection
The allowed macrostates for the model.
initial_state_id, final_state_id : string
The identifier strings for the states where a time trace
is expected to start and finish, respectively.
"""
sc_factory = StateCollectionFactory()
for this_count_list in multichoose(self.num_microstates, self.N):
I = this_count_list[0]
A = this_count_list[1]
D = this_count_list[2]
B = this_count_list[3]
if A > self.max_A:
continue
else:
if A > 0:
obs_class = 'bright'
else:
obs_class = 'dark'
id_str = "%d_%d_%d_%d" % (I, A, D, B)
this_state = self.state_factory(id_str, I, A, D, B,
obs_class)
if I == self.N:
initial_state_id = this_state.get_id()
elif B == self.N:
final_state_id = this_state.get_id()
else:
pass
sc_factory.add_state(this_state)
state_collection = sc_factory.make_state_collection()
return state_collection, initial_state_id, final_state_id
return enumerate_states
class DoubleDarkStateEnumeratorFactory(object):
"""
Creates a state enumerator for a BlinkModel with two dark states.
Attributes
----------
num_microstates : int
Parameters
----------
N : int
The total number of fluorophores.
state_factory : class
Factory class for State objects.
max_A : int
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, N, state_factory=DoubleDarkState, max_A=5):
assert type(N) is IntType
self.N = N
self.state_factory = state_factory
self.max_A = max_A
self.num_microstates = len(['I', 'A', 'D1', 'D2', 'B'])
def create_state_enumerator(self):
"""
Creates a method that builds a StateCollection, made up of
all possible macrostates in the model, subject to the
constraint that no states with `A` > `max_A` are allowed.
Returns
-------
enumerate_states : callable f()
A method that builds a StateCollection.
"""
def enumerate_states():
"""
Builds a StateCollection for a model with one dark state.
No states with `A` > `max_A` are allowed.
Returns
-------
state_collection : StateCollection
The allowed macrostates for the model.
initial_state_id, final_state_id : string
The identifier strings for the states where a time trace
is expected to start and finish, respectively.
"""
sc_factory = StateCollectionFactory()
for this_count_list in multichoose(self.num_microstates, self.N):
I = this_count_list[0]
A = this_count_list[1]
D1 = this_count_list[2]
D2 = this_count_list[3]
B = this_count_list[4]
if A > self.max_A:
continue
else:
if A > 0:
obs_class = 'bright'
else:
obs_class = 'dark'
id_str = "%d_%d_%d_%d_%d" % (I, A, D1, D2, B)
this_state = self.state_factory(id_str, I, A, D1, D2, B,
obs_class)
if I == self.N:
initial_state_id = this_state.get_id()
elif B == self.N:
final_state_id = this_state.get_id()
else:
pass
sc_factory.add_state(this_state)
state_collection = sc_factory.make_state_collection()
return state_collection, initial_state_id, final_state_id
return enumerate_states
|
bsd-2-clause
| 1,250,265,342,732,406,800
| 33.956522
| 79
| 0.537766
| false
| 4.088766
| false
| false
| false
|
walles/px
|
px/px_load.py
|
1
|
3820
|
"""
Functions for visualizing system load over time in a Unicode graph.
The one you probably want to call is get_load_string().
"""
import os
from . import px_cpuinfo
from . import px_terminal
import sys
if sys.version_info.major >= 3:
# For mypy PEP-484 static typing validation
from six import text_type # NOQA
from typing import Tuple # NOQA
physical, logical = px_cpuinfo.get_core_count()
physical_string = px_terminal.bold(str(physical) + " cores")
cores_string = "[{} | {} virtual]".format(physical_string, logical)
def average_to_level(average, peak):
level = 3 * (average / peak)
return int(round(level))
def averages_to_levels(avg0, avg1, avg2):
"""
Converts three load averages into three levels.
A level is a 0-3 integer value.
This function returns the three leves, plus the peak value the levels are
based on.
"""
peak = max(avg0, avg1, avg2)
if peak < 1.0:
peak = 1.0
l0 = average_to_level(avg0, peak)
l1 = average_to_level(avg1, peak)
l2 = average_to_level(avg2, peak)
return (l0, l1, l2, peak)
def levels_to_graph(levels):
"""
Convert an array of levels into a unicode string graph.
Each level in the levels array is an integer 0-3. Those levels will be
represented in the graph by 1-4 dots each.
The returned string will contain two levels per rune.
"""
if len(levels) % 2 == 1:
# Left pad uneven-length arrays with an empty column
levels = [-1] + levels
# From: http://stackoverflow.com/a/19177754/473672
unicodify = chr
try:
# Python 2
unicodify = unichr # type: ignore
except NameError:
# Python 3
pass
# https://en.wikipedia.org/wiki/Braille_Patterns#Identifying.2C_naming_and_ordering
LEFT_BAR = [0x00, 0x40, 0x44, 0x46, 0x47]
RIGHT_BAR = [0x00, 0x80, 0xA0, 0xB0, 0xB8]
graph = ""
for index in range(0, len(levels) - 1, 2):
left_level = levels[index] + 1
right_level = levels[index + 1] + 1
code = 0x2800 + LEFT_BAR[left_level] + RIGHT_BAR[right_level]
graph += unicodify(code)
return graph
def get_load_values():
# type: () -> Tuple[float, float, float]
"""
Returns three system load numbers:
* The first is the average system load over the last 0m-1m
* The second is the average system load over the last 1m-5m
* The third is the average system load over the last 5m-15m
"""
avg1, avg5, avg15 = os.getloadavg()
avg0to1 = avg1
avg1to5 = (5 * avg5 - avg1) / 4.0
avg5to15 = (15 * avg15 - 5 * avg5) / 10.0
return (avg0to1, avg1to5, avg5to15)
def get_load_string(load_values=None):
# type: (Tuple[float, float, float]) -> text_type
"""
Example return string, underlines indicate bold:
"1.5 [4 cores | 8 virtual] [15m load history: GRAPH]"
^^^ ^^^^^^^ ^^^^^
Load number is color coded:
* <= physical core count: Green
* <= virtual core count: Yellow
* Larger: Red
"""
if load_values is None:
load_values = get_load_values()
avg0to1, avg1to5, avg5to15 = load_values
load_string = u"{:.1f}".format(avg0to1)
if avg0to1 <= physical:
load_string = px_terminal.green(load_string)
elif avg0to1 <= logical:
load_string = px_terminal.yellow(load_string)
else:
load_string = px_terminal.red(load_string)
recent, between, old, peak = averages_to_levels(avg0to1, avg1to5, avg5to15)
graph = levels_to_graph([old] * 10 + [between] * 4 + [recent])
# Increase intensity for more recent times
graph = px_terminal.faint(graph[0:3]) + graph[3:6] + px_terminal.bold(graph[6:])
return u"{} {} [15m load history: {}]".format(load_string, cores_string, graph)
|
mit
| 1,131,774,955,498,635,500
| 27.939394
| 87
| 0.624607
| false
| 3.234547
| false
| false
| false
|
ajylee/gpaw-rtxs
|
gpaw/response/bse.py
|
1
|
25096
|
from time import time, ctime
import numpy as np
import pickle
from math import pi
from ase.units import Hartree
from ase.io import write
from gpaw.io.tar import Writer, Reader
from gpaw.mpi import world, size, rank, serial_comm
from gpaw.utilities.blas import gemmdot, gemm, gemv
from gpaw.utilities import devnull
from gpaw.utilities.memory import maxrss
from gpaw.response.base import BASECHI
from gpaw.response.parallel import parallel_partition
from gpaw.response.df import DF
class BSE(BASECHI):
"""This class defines Belth-Selpether equations."""
def __init__(self,
calc=None,
nbands=None,
nc=None,
nv=None,
w=None,
q=None,
eshift=None,
ecut=10.,
eta=0.2,
rpad=np.array([1,1,1]),
vcut=None,
ftol=1e-5,
txt=None,
optical_limit=False,
positive_w=False, # True : use Tamm-Dancoff Approx
use_W=True, # True: include screened interaction kernel
qsymm=True):
BASECHI.__init__(self, calc=calc, nbands=nbands, w=w, q=q,
eshift=eshift, ecut=ecut, eta=eta, rpad=rpad,
ftol=ftol, txt=txt, optical_limit=optical_limit)
self.epsilon_w = None
self.positive_w = positive_w
self.vcut = vcut
self.nc = nc # conduction band index
self.nv = nv # valence band index
self.use_W = use_W
self.qsymm = qsymm
def initialize(self):
self.printtxt('')
self.printtxt('-----------------------------------------------')
self.printtxt('Bethe Salpeter Equation calculation started at:')
self.printtxt(ctime())
BASECHI.initialize(self)
calc = self.calc
self.kd = kd = calc.wfs.kd
# frequency points init
self.dw = self.w_w[1] - self.w_w[0]
assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all() # make sure its linear w grid
assert self.w_w.max() == self.w_w[-1]
self.dw /= Hartree
self.w_w /= Hartree
self.wmax = self.w_w[-1]
self.Nw = int(self.wmax / self.dw) + 1
# band init
if self.nc is None and self.positive_w is True: # applied only to semiconductor
nv = self.nvalence / 2 - 1
self.nv = np.array([nv, nv+1]) # conduction band start / end
self.nc = np.array([nv+1, nv+2]) # valence band start / end
self.printtxt('Number of electrons: %d' %(self.nvalence))
self.printtxt('Valence band included : (band %d to band %d)' %(self.nv[0],self.nv[1]-1))
self.printtxt('Conduction band included : (band %d to band %d)' %(self.nc[0],self.nc[1]-1))
elif self.nc == 'all' or self.positive_w is False: # applied to metals
self.nv = np.array([0, self.nbands])
self.nc = np.array([0, self.nbands])
self.printtxt('All the bands are included')
else:
self.printtxt('User defined bands for BSE.')
self.printtxt('Valence band included: (band %d to band %d)' %(self.nv[0],self.nv[1]-1))
self.printtxt('Conduction band included: (band %d to band %d)' %(self.nc[0],self.nc[1]-1))
# find the pair index and initialized pair energy (e_i - e_j) and occupation(f_i-f_j)
self.e_S = {}
focc_s = {}
self.Sindex_S3 = {}
iS = 0
kq_k = self.kq_k
for k1 in range(self.nkpt):
ibzkpt1 = kd.bz2ibz_k[k1]
ibzkpt2 = kd.bz2ibz_k[kq_k[k1]]
for n1 in range(self.nv[0], self.nv[1]):
for m1 in range(self.nc[0], self.nc[1]):
focc = self.f_kn[ibzkpt1,n1] - self.f_kn[ibzkpt2,m1]
if not self.positive_w: # Dont use Tamm-Dancoff Approx.
check_ftol = np.abs(focc) > self.ftol
else:
check_ftol = focc > self.ftol
if check_ftol:
self.e_S[iS] =self.e_kn[ibzkpt2,m1] - self.e_kn[ibzkpt1,n1]
focc_s[iS] = focc
self.Sindex_S3[iS] = (k1, n1, m1)
iS += 1
self.nS = iS
self.focc_S = np.zeros(self.nS)
for iS in range(self.nS):
self.focc_S[iS] = focc_s[iS]
if self.use_W:
# q points init
self.bzq_qc = kd.get_bz_q_points()
if not self.qsymm:
self.ibzq_qc = self.bzq_qc
else:
# if use q symmetry, kpoint and qpoint grid should be the same
(self.ibzq_qc, self.ibzq_q, self.iop_q,
self.timerev_q, self.diff_qc) = kd.get_ibz_q_points(self.bzq_qc,
calc.wfs.symmetry.op_scc)
if np.abs(self.bzq_qc - self.bzk_kc).sum() < 1e-8:
assert np.abs(self.ibzq_qc - kd.ibzk_kc).sum() < 1e-8
self.nibzq = len(self.ibzq_qc)
# parallel init
self.Scomm = world
# kcomm and wScomm is only to be used when wavefunctions r parallelly distributed.
self.kcomm = world
self.wScomm = serial_comm
self.nS, self.nS_local, self.nS_start, self.nS_end = parallel_partition(
self.nS, world.rank, world.size, reshape=False)
self.print_bse()
if calc.input_parameters['mode'] == 'lcao':
calc.initialize_positions()
# Coulomb kernel init
self.kc_G = np.zeros(self.npw)
for iG in range(self.npw):
index = self.Gindex_G[iG]
qG = np.dot(self.q_c + self.Gvec_Gc[iG], self.bcell_cv)
self.kc_G[iG] = 1. / np.inner(qG, qG)
if self.optical_limit:
self.kc_G[0] = 0.
self.printtxt('')
return
def calculate(self):
calc = self.calc
f_kn = self.f_kn
e_kn = self.e_kn
ibzk_kc = self.ibzk_kc
bzk_kc = self.bzk_kc
kq_k = self.kq_k
focc_S = self.focc_S
e_S = self.e_S
op_scc = calc.wfs.symmetry.op_scc
if self.use_W:
bzq_qc=self.bzq_qc
ibzq_qc = self.ibzq_qc
if type(self.use_W) is str:
# read
data = pickle.load(open(self.use_W))
W_qGG = data['W_qGG']
self.dfinvG0_G = data['dfinvG0_G']
self.printtxt('Finished reading screening interaction kernel')
elif type(self.use_W) is bool:
# calculate from scratch
self.printtxt('Calculating screening interaction kernel.')
W_qGG = self.full_static_screened_interaction()
else:
raise ValueError('use_W can only be string or bool ')
# calculate phi_qaGp
import os.path
if not os.path.isfile('phi_qaGp'):
self.printtxt('Calculating phi_qaGp')
self.get_phi_qaGp()
world.barrier()
self.reader = Reader('phi_qaGp')
self.printtxt('Finished reading phi_aGp !')
self.printtxt('Memory used %f M' %(maxrss() / 1024.**2))
else:
self.phi_aGp = self.get_phi_aGp()
# calculate kernel
K_SS = np.zeros((self.nS, self.nS), dtype=complex)
W_SS = np.zeros_like(K_SS)
self.rhoG0_S = np.zeros((self.nS), dtype=complex)
t0 = time()
self.printtxt('Calculating BSE matrix elements.')
noGmap = 0
for iS in range(self.nS_start, self.nS_end):
k1, n1, m1 = self.Sindex_S3[iS]
rho1_G = self.density_matrix(n1,m1,k1)
self.rhoG0_S[iS] = rho1_G[0]
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
rho2_G = self.density_matrix(n2,m2,k2)
K_SS[iS, jS] = np.sum(rho1_G.conj() * rho2_G * self.kc_G)
if self.use_W:
rho3_G = self.density_matrix(n1,n2,k1,k2)
rho4_G = self.density_matrix(m1,m2,self.kq_k[k1],self.kq_k[k2])
q_c = bzk_kc[k2] - bzk_kc[k1]
q_c[np.where(q_c > 0.501)] -= 1.
q_c[np.where(q_c < -0.499)] += 1.
if not self.qsymm:
ibzq = self.kd.where_is_q(q_c, self.bzq_qc)
W_GG = W_qGG[ibzq].copy()
else:
iq = self.kd.where_is_q(q_c, self.bzq_qc)
ibzq = self.ibzq_q[iq]
iop = self.iop_q[iq]
timerev = self.timerev_q[iq]
diff_c = self.diff_qc[iq]
invop = np.linalg.inv(op_scc[iop])
W_GG_tmp = W_qGG[ibzq]
Gindex = np.zeros(self.npw,dtype=int)
for iG in range(self.npw):
G_c = self.Gvec_Gc[iG]
if timerev:
RotG_c = -np.int8(np.dot(invop, G_c+diff_c).round())
else:
RotG_c = np.int8(np.dot(invop, G_c+diff_c).round())
tmp_G = np.abs(self.Gvec_Gc - RotG_c).sum(axis=1)
try:
Gindex[iG] = np.where(tmp_G < 1e-5)[0][0]
except:
noGmap += 1
Gindex[iG] = -1
W_GG = np.zeros_like(W_GG_tmp)
for iG in range(self.npw):
for jG in range(self.npw):
if Gindex[iG] == -1 or Gindex[jG] == -1:
W_GG[iG, jG] = 0
else:
W_GG[iG, jG] = W_GG_tmp[Gindex[iG], Gindex[jG]]
if k1 == k2:
if (n1==n2) or (m1==m2):
tmp_G = np.zeros(self.npw, dtype=complex)
q = np.array([0.0001,0,0])
for jG in range(1, self.npw):
qG = np.dot(q+self.Gvec_Gc[jG], self.bcell_cv)
tmp_G[jG] = self.dfinvG0_G[jG] / np.sqrt(np.inner(qG,qG))
const = 1./pi*self.vol*(6*pi**2/self.vol/self.nkpt)**(2./3.)
tmp_G *= const
W_GG[:,0] = tmp_G
W_GG[0,:] = tmp_G.conj()
W_GG[0,0] = 2./pi*(6*pi**2/self.vol/self.nkpt)**(1./3.) \
* self.dfinvG0_G[0] *self.vol
tmp_GG = np.outer(rho3_G.conj(), rho4_G) * W_GG
W_SS[iS, jS] = np.sum(tmp_GG)
# self.printtxt('%d %d %s %s' %(iS, jS, K_SS[iS,jS], W_SS[iS,jS]))
self.timing(iS, t0, self.nS_local, 'pair orbital')
K_SS *= 4 * pi / self.vol
if self.use_W:
K_SS -= 0.5 * W_SS / self.vol
world.sum(K_SS)
world.sum(self.rhoG0_S)
self.printtxt('The number of G index outside the Gvec_Gc: %d'%(noGmap))
# get and solve hamiltonian
H_SS = np.zeros_like(K_SS)
for iS in range(self.nS):
H_SS[iS,iS] = e_S[iS]
for jS in range(self.nS):
H_SS[iS,jS] += focc_S[iS] * K_SS[iS,jS]
if self.positive_w is True: # matrix should be Hermitian
for iS in range(self.nS):
for jS in range(self.nS):
if np.abs(H_SS[iS,jS]- H_SS[jS,iS].conj()) > 1e-4:
print iS, jS, H_SS[iS,jS]- H_SS[jS,iS].conj()
# assert np.abs(H_SS[iS,jS]- H_SS[jS,iS].conj()) < 1e-4
# make the matrix hermitian
if self.use_W:
H_SS = (np.real(H_SS) + np.real(H_SS.T)) / 2. + 1j * (np.imag(H_SS) - np.imag(H_SS.T)) /2.
# if not self.positive_w:
self.w_S, self.v_SS = np.linalg.eig(H_SS)
# else:
# from gpaw.utilities.lapack import diagonalize
# self.w_S = np.zeros(self.nS, dtype=complex)
# diagonalize(H_SS, self.w_S)
# self.v_SS = H_SS.T.copy() # eigenvectors in the rows
data = {
'w_S': self.w_S,
'v_SS':self.v_SS,
'rhoG0_S':self.rhoG0_S
}
if rank == 0:
pickle.dump(data, open('H_SS.pckl', 'w'), -1)
return
def full_static_screened_interaction(self):
"""Calcuate W_GG(q)"""
W_qGG = np.zeros((self.nibzq, self.npw, self.npw),dtype=complex)
t0 = time()
for iq in range(self.nibzq):#self.q_start, self.q_end):
W_qGG[iq] = self.screened_interaction_kernel(iq, static=True)
self.timing(iq, t0, self.nibzq, 'iq')
data = {'W_qGG': W_qGG,
'dfinvG0_G': self.dfinvG0_G}
if rank == 0:
pickle.dump(data, open('W_qGG.pckl', 'w'), -1)
return W_qGG
def print_bse(self):
printtxt = self.printtxt
if self.use_W:
printtxt('Number of q points : %d' %(self.nibzq))
printtxt('Number of frequency points : %d' %(self.Nw) )
printtxt('Number of pair orbitals : %d' %(self.nS) )
printtxt('Parallelization scheme:')
printtxt(' Total cpus : %d' %(world.size))
printtxt(' pair orb parsize : %d' %(self.Scomm.size))
return
def get_phi_qaGp(self):
N1_max = 0
N2_max = 0
natoms = len(self.calc.wfs.setups)
for id in range(natoms):
N1 = self.npw
N2 = self.calc.wfs.setups[id].ni**2
if N1 > N1_max:
N1_max = N1
if N2 > N2_max:
N2_max = N2
nbzq = self.nkpt
nbzq, nq_local, q_start, q_end = parallel_partition(
nbzq, world.rank, world.size, reshape=False)
phimax_qaGp = np.zeros((nq_local, natoms, N1_max, N2_max), dtype=complex)
for iq in range(nq_local):
self.printtxt('%d' %(iq))
q_c = self.bzq_qc[iq + q_start]
tmp_aGp = self.get_phi_aGp(q_c)
for id in range(natoms):
N1, N2 = tmp_aGp[id].shape
phimax_qaGp[iq, id, :N1, :N2] = tmp_aGp[id]
world.barrier()
# write to disk
filename = 'phi_qaGp'
if world.rank == 0:
w = Writer(filename)
w.dimension('nbzq', nbzq)
w.dimension('natoms', natoms)
w.dimension('nG', N1_max)
w.dimension('nii', N2_max)
w.add('phi_qaGp', ('nbzq', 'natoms', 'nG', 'nii',), dtype=complex)
for q in range(nbzq):
residual = nbzq % size
N_local = nbzq // size
if q < residual * (N_local + 1):
qrank = q // (N_local + 1)
else:
qrank = (q - residual * (N_local + 1)) // N_local + residual
if qrank == 0:
if world.rank == 0:
phi_aGp = phimax_qaGp[q - q_start]
else:
if world.rank == qrank:
phi_aGp = phimax_qaGp[q - q_start]
world.send(phi_aGp, 0, q)
elif world.rank == 0:
world.receive(phi_aGp, qrank, q)
if world.rank == 0:
w.fill(phi_aGp)
world.barrier()
if world.rank == 0:
w.close()
return
def load_phi_aGp(self, reader, iq):
phimax_aGp = np.array(reader.get('phi_qaGp', iq), complex)
phi_aGp = {}
natoms = len(phimax_aGp)
for a in range(natoms):
N1 = self.npw
N2 = self.calc.wfs.setups[a].ni**2
phi_aGp[a] = phimax_aGp[a, :N1, :N2]
return phi_aGp
def get_dielectric_function(self, filename='df.dat', readfile=None, overlap=True):
if self.epsilon_w is None:
self.initialize()
if readfile is None:
self.calculate()
self.printtxt('Calculating dielectric function.')
else:
data = pickle.load(open(readfile))
self.w_S = data['w_S']
self.v_SS = data['v_SS']
self.rhoG0_S = data['rhoG0_S']
self.printtxt('Finished reading H_SS.pckl')
w_S = self.w_S
v_SS = self.v_SS # v_SS[:,lamda]
rhoG0_S = self.rhoG0_S
focc_S = self.focc_S
# get overlap matrix
if not self.positive_w:
tmp = np.dot(v_SS.conj().T, v_SS )
overlap_SS = np.linalg.inv(tmp)
# get chi
epsilon_w = np.zeros(self.Nw, dtype=complex)
t0 = time()
A_S = np.dot(rhoG0_S, v_SS)
B_S = np.dot(rhoG0_S*focc_S, v_SS)
if not self.positive_w:
C_S = np.dot(B_S.conj(), overlap_SS.T) * A_S
else:
C_S = B_S.conj() * A_S
for iw in range(self.Nw):
tmp_S = 1. / (iw*self.dw - w_S + 1j*self.eta)
epsilon_w[iw] += np.dot(tmp_S, C_S)
epsilon_w *= - 4 * pi / np.inner(self.qq_v, self.qq_v) / self.vol
epsilon_w += 1
self.epsilon_w = epsilon_w
if rank == 0:
f = open(filename,'w')
for iw in range(self.Nw):
energy = iw * self.dw * Hartree
print >> f, energy, np.real(epsilon_w[iw]), np.imag(epsilon_w[iw])
f.close()
# Wait for I/O to finish
world.barrier()
"""Check f-sum rule."""
N1 = 0
for iw in range(self.Nw):
w = iw * self.dw
N1 += np.imag(epsilon_w[iw]) * w
N1 *= self.dw * self.vol / (2 * pi**2)
self.printtxt('')
self.printtxt('Sum rule:')
nv = self.nvalence
self.printtxt('N1 = %f, %f %% error' %(N1, (N1 - nv) / nv * 100) )
return epsilon_w
def get_e_h_density(self, lamda=None, filename=None):
if filename is not None:
self.load(filename)
self.initialize()
gd = self.gd
w_S = self.w_S
v_SS = self.v_SS
A_S = v_SS[:, lamda]
kq_k = self.kq_k
kd = self.kd
# Electron density
nte_R = gd.zeros()
for iS in range(self.nS_start, self.nS_end):
print 'electron density:', iS
k1, n1, m1 = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[k1]
psitold_g = self.get_wavefunction(ibzkpt1, n1)
psit1_g = kd.transform_wave_function(psitold_g, k1)
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
if m1 == m2 and k1 == k2:
psitold_g = self.get_wavefunction(ibzkpt1, n2)
psit2_g = kd.transform_wave_function(psitold_g, k1)
nte_R += A_S[iS] * A_S[jS].conj() * psit1_g.conj() * psit2_g
# Hole density
nth_R = gd.zeros()
for iS in range(self.nS_start, self.nS_end):
print 'hole density:', iS
k1, n1, m1 = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[kq_k[k1]]
psitold_g = self.get_wavefunction(ibzkpt1, m1)
psit1_g = kd.transform_wave_function(psitold_g, kq_k[k1])
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
if n1 == n2 and k1 == k2:
psitold_g = self.get_wavefunction(ibzkpt1, m2)
psit2_g = kd.transform_wave_function(psitold_g, kq_k[k1])
nth_R += A_S[iS] * A_S[jS].conj() * psit1_g * psit2_g.conj()
self.Scomm.sum(nte_R)
self.Scomm.sum(nth_R)
if rank == 0:
write('rho_e.cube',self.calc.atoms, format='cube', data=nte_R)
write('rho_h.cube',self.calc.atoms, format='cube', data=nth_R)
world.barrier()
return
def get_excitation_wavefunction(self, lamda=None,filename=None, re_c=None, rh_c=None):
""" garbage at the moment. come back later"""
if filename is not None:
self.load(filename)
self.initialize()
gd = self.gd
w_S = self.w_S
v_SS = self.v_SS
A_S = v_SS[:, lamda]
kq_k = self.kq_k
kd = self.kd
nx, ny, nz = self.nG[0], self.nG[1], self.nG[2]
nR = 9
nR2 = (nR - 1 ) // 2
if re_c is not None:
psith_R = gd.zeros(dtype=complex)
psith2_R = np.zeros((nR*nx, nR*ny, nz), dtype=complex)
elif rh_c is not None:
psite_R = gd.zeros(dtype=complex)
psite2_R = np.zeros((nR*nx, ny, nR*nz), dtype=complex)
else:
self.printtxt('No wavefunction output !')
return
for iS in range(self.nS_start, self.nS_end):
k, n, m = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[k]
ibzkpt2 = kd.bz2ibz_k[kq_k[k]]
print 'hole wavefunction', iS, (k,n,m),A_S[iS]
psitold_g = self.get_wavefunction(ibzkpt1, n)
psit1_g = kd.transform_wave_function(psitold_g, k)
psitold_g = self.get_wavefunction(ibzkpt2, m)
psit2_g = kd.transform_wave_function(psitold_g, kq_k[k])
if re_c is not None:
# given electron position, plot hole wavefunction
tmp = A_S[iS] * psit1_g[re_c].conj() * psit2_g
psith_R += tmp
k_c = self.bzk_kc[k] + self.q_c
for i in range(nR):
for j in range(nR):
R_c = np.array([i-nR2, j-nR2, 0])
psith2_R[i*nx:(i+1)*nx, j*ny:(j+1)*ny, 0:nz] += \
tmp * np.exp(1j*2*pi*np.dot(k_c,R_c))
elif rh_c is not None:
# given hole position, plot electron wavefunction
tmp = A_S[iS] * psit1_g.conj() * psit2_g[rh_c] * self.expqr_g
psite_R += tmp
k_c = self.bzk_kc[k]
k_v = np.dot(k_c, self.bcell_cv)
for i in range(nR):
for j in range(nR):
R_c = np.array([i-nR2, 0, j-nR2])
R_v = np.dot(R_c, self.acell_cv)
assert np.abs(np.dot(k_v, R_v) - np.dot(k_c, R_c) * 2*pi).sum() < 1e-5
psite2_R[i*nx:(i+1)*nx, 0:ny, j*nz:(j+1)*nz] += \
tmp * np.exp(-1j*np.dot(k_v,R_v))
else:
pass
if re_c is not None:
self.Scomm.sum(psith_R)
self.Scomm.sum(psith2_R)
if rank == 0:
write('psit_h.cube',self.calc.atoms, format='cube', data=psith_R)
atoms = self.calc.atoms
shift = atoms.cell[0:2].copy()
positions = atoms.positions
atoms.cell[0:2] *= nR2
atoms.positions += shift * (nR2 - 1)
write('psit_bigcell_h.cube',atoms, format='cube', data=psith2_R)
elif rh_c is not None:
self.Scomm.sum(psite_R)
self.Scomm.sum(psite2_R)
if rank == 0:
write('psit_e.cube',self.calc.atoms, format='cube', data=psite_R)
atoms = self.calc.atoms
# shift = atoms.cell[0:2].copy()
positions = atoms.positions
atoms.cell[0:2] *= nR2
# atoms.positions += shift * (nR2 - 1)
write('psit_bigcell_e.cube',atoms, format='cube', data=psite2_R)
else:
pass
world.barrier()
return
def load(self, filename):
data = pickle.load(open(filename))
self.w_S = data['w_S']
self.v_SS = data['v_SS']
self.printtxt('Read succesfully !')
def save(self, filename):
"""Dump essential data"""
data = {'w_S' : self.w_S,
'v_SS' : self.v_SS}
if rank == 0:
pickle.dump(data, open(filename, 'w'), -1)
world.barrier()
|
gpl-3.0
| -5,935,395,398,127,700,000
| 35.318379
| 114
| 0.456447
| false
| 3.115194
| false
| false
| false
|
edx-solutions/xblock-group-project
|
group_project/group_project.py
|
1
|
40500
|
# -*- coding: utf-8 -*-
#
# Imports ###########################################################
import json
import logging
import textwrap
from datetime import datetime, timedelta
from io import StringIO
from xml.etree import ElementTree as ET
import pytz
import webob
from django.conf import settings
from django.utils import html
from django.utils.translation import ugettext as _
from lxml import etree
from pkg_resources import resource_filename
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Dict, Float, Integer, Scope, String
from xblock.fragment import Fragment
from .api_error import ApiError
from .group_activity import GroupActivity
from .project_api import ProjectAPI
from .upload_file import UploadFile
from .utils import AttrDict, load_resource, render_template
ALLOWED_OUTSIDER_ROLES = getattr(settings, "ALLOWED_OUTSIDER_ROLES", None)
if ALLOWED_OUTSIDER_ROLES is None:
ALLOWED_OUTSIDER_ROLES = ["assistant"]
try:
from edx_notifications.data import NotificationMessage
except:
# Notifications is an optional runtime configuration, so it may not be available for import
pass
# Globals ###########################################################
log = logging.getLogger(__name__)
# Classes ###########################################################
def make_key(*args):
return ":".join([str(a) for a in args])
class OutsiderDisallowedError(Exception):
def __init__(self, detail):
self.value = detail
super(OutsiderDisallowedError, self).__init__()
def __str__(self):
return "Outsider Denied Access: {}".format(self.value)
def __unicode__(self):
return "Outsider Denied Access: {}".format(self.value)
@XBlock.wants('notifications')
@XBlock.wants('courseware_parent_info')
class GroupProjectBlock(XBlock):
"""
XBlock providing a group activity project for a group of students to collaborate upon.
"""
completion_mode = XBlockCompletionMode.EXCLUDED
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
default="Group Project"
)
weight = Float(
display_name="Weight",
help="This is the maximum score that the user receives when he/she successfully completes the problem",
scope=Scope.settings,
default=100.0
)
group_reviews_required_count = Integer(
display_name="Reviews Required Minimum",
help="The minimum number of group-reviews that should be applied to a set of submissions (set to 0 to be 'TA Graded')",
scope=Scope.settings,
default=3
)
user_review_count = Integer(
display_name="User Reviews Required Minimum",
help="The minimum number of other-group reviews that an individual user should perform",
scope=Scope.settings,
default=1
)
item_state = Dict(
help="JSON payload for assessment values",
scope=Scope.user_state
)
with open(resource_filename(__name__, 'res/default.xml'), "r") as default_xml_file:
default_xml = default_xml_file.read()
data = String(
display_name="",
help="XML contents to display for this module",
scope=Scope.content,
default=textwrap.dedent(default_xml)
)
has_score = True
_project_api = None
def _confirm_outsider_allowed(self):
granted_roles = [r["role"] for r in self.project_api.get_user_roles_for_course(self.user_id, self.course_id)]
for allowed_role in ALLOWED_OUTSIDER_ROLES:
if allowed_role in granted_roles:
return True
raise OutsiderDisallowedError("User does not have an allowed role")
_known_real_user_ids = {}
def real_user_id(self, anonymous_student_id):
if anonymous_student_id not in self._known_real_user_ids:
self._known_real_user_ids[anonymous_student_id] = self.xmodule_runtime.get_real_user(anonymous_student_id).id
return self._known_real_user_ids[anonymous_student_id]
@property
def milestone_dates(self):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
return group_activity.milestone_dates
@property
def project_api(self):
if self._project_api is None:
api_server = "http://127.0.0.1:18000"
if hasattr(settings, 'API_LOOPBACK_ADDRESS'):
api_server = settings.API_LOOPBACK_ADDRESS
self._project_api = ProjectAPI(api_server)
return self._project_api
@property
def user_id(self):
try:
return self.real_user_id(self.xmodule_runtime.anonymous_student_id)
except:
return None
_workgroup = None
@property
def workgroup(self):
if self._workgroup is None:
try:
user_prefs = self.project_api.get_user_preferences(self.user_id)
if "TA_REVIEW_WORKGROUP" in user_prefs:
self._confirm_outsider_allowed()
self._workgroup = self.project_api.get_workgroup_by_id(user_prefs["TA_REVIEW_WORKGROUP"])
else:
self._workgroup = self.project_api.get_user_workgroup_for_course(
self.user_id,
self.course_id
)
except OutsiderDisallowedError:
raise
except:
self._workgroup = {
"id": "0",
"users": [],
}
return self._workgroup
@property
def is_group_member(self):
return self.user_id in [u["id"] for u in self.workgroup["users"]]
@property
def is_admin_grader(self):
return not self.is_group_member
@property
def content_id(self):
try:
return str(self.scope_ids.usage_id)
except:
return self.id
@property
def course_id(self):
try:
return str(self.xmodule_runtime.course_id)
except:
return self.xmodule_runtime.course_id
def student_view(self, context):
"""
Player view, displayed to the student
"""
try:
workgroup = self.workgroup
except OutsiderDisallowedError as ode:
error_fragment = Fragment()
error_fragment.add_content(render_template('/templates/html/loading_error.html', {'error_message': str(ode)}))
error_fragment.add_javascript(load_resource('public/js/group_project_error.js'))
error_fragment.initialize_js('GroupProjectError')
return error_fragment
user_id = self.user_id
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
try:
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(workgroup["id"])
)
except:
pass
if self.is_group_member:
try:
team_members = [self.project_api.get_user_details(tm["id"]) for tm in workgroup["users"] if user_id != int(tm["id"])]
except:
team_members = []
try:
assess_groups = self.project_api.get_workgroups_to_review(user_id, self.course_id, self.content_id)
except:
assess_groups = []
else:
team_members = []
assess_groups = [workgroup]
context = {
"group_activity": group_activity,
"team_members": json.dumps(team_members),
"assess_groups": json.dumps(assess_groups),
"ta_graded": (self.group_reviews_required_count < 1),
}
fragment = Fragment()
fragment.add_content(
render_template('/templates/html/group_project.html', context))
fragment.add_css(load_resource('public/css/group_project.css'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/jquery.ui.widget.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/jquery.fileupload.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/jquery.iframe-transport.js'))
fragment.add_javascript(load_resource('public/js/group_project.js'))
fragment.initialize_js('GroupProjectBlock')
return fragment
def studio_view(self, context):
"""
Editing view in Studio
"""
fragment = Fragment()
fragment.add_content(render_template('/templates/html/group_project_edit.html', {
'self': self,
}))
fragment.add_css(load_resource('public/css/group_project_edit.css'))
fragment.add_javascript(
load_resource('public/js/group_project_edit.js'))
fragment.initialize_js('GroupProjectEditBlock')
return fragment
def assign_grade_to_group(self, group_id, grade_value):
self.project_api.set_group_grade(
group_id,
self.course_id,
self.content_id,
grade_value,
self.weight
)
# Emit analytics event...
self.runtime.publish(
self,
"group_activity.final_grade",
{
"grade_value": grade_value,
"group_id": group_id,
"content_id": self.content_id,
}
)
notifications_service = self.runtime.service(self, 'notifications')
if notifications_service:
self.fire_grades_posted_notification(group_id, notifications_service)
def calculate_grade(self, group_id):
def mean(value_array):
numeric_values = [float(v) for v in value_array]
return float(sum(numeric_values)/len(numeric_values))
review_item_data = self.project_api.get_workgroup_review_items_for_group(group_id, self.content_id)
review_item_map = {make_key(r['question'], self.real_user_id(r['reviewer'])) : r['answer'] for r in review_item_data}
all_reviewer_ids = set([self.real_user_id(r['reviewer']) for r in review_item_data])
group_reviewer_ids = [u["id"] for u in self.project_api.get_workgroup_reviewers(group_id, self.content_id)]
admin_reviewer_ids = [ar_id for ar_id in all_reviewer_ids if ar_id not in group_reviewer_ids]
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
def get_user_grade_value_list(user_id):
user_grades = []
for q_id in group_activity.grade_questions:
user_value = review_item_map.get(make_key(q_id, user_id), None)
if user_value is None:
# if any are incomplete, we consider the whole set to be unusable
return None
else:
user_grades.append(user_value)
return user_grades
admin_provided_grades = None
if len(admin_reviewer_ids) > 0:
admin_provided_grades = []
# Only include complete admin gradesets
admin_reviewer_grades = [
arg
for arg in [get_user_grade_value_list(admin_id) for admin_id in admin_reviewer_ids]
if arg
]
admin_grader_count = len(admin_reviewer_grades)
if admin_grader_count > 1:
for idx in range(len(group_activity.grade_questions)):
admin_provided_grades.append(mean([adm[idx] for adm in admin_reviewer_grades]))
elif admin_grader_count > 0:
admin_provided_grades = admin_reviewer_grades[0]
user_grades = {}
if len(group_reviewer_ids) > 0:
for r_id in group_reviewer_ids:
this_reviewers_grades = get_user_grade_value_list(r_id)
if this_reviewers_grades is None:
if admin_provided_grades:
this_reviewers_grades = admin_provided_grades
else:
return None
user_grades[r_id] = this_reviewers_grades
elif admin_provided_grades:
group_reviewer_ids = [self.user_id]
user_grades[self.user_id] = admin_provided_grades
else:
return None
# Okay, if we've got here we have a complete set of marks to calculate the grade
reviewer_grades = [mean(user_grades[r_id]) for r_id in group_reviewer_ids if len(user_grades[r_id]) > 0]
group_grade = round(mean(reviewer_grades)) if len(reviewer_grades) > 0 else None
return group_grade
def mark_complete_stage(self, user_id, stage):
try:
self.project_api.mark_as_complete(
self.course_id,
self.content_id,
user_id,
stage
)
except ApiError as e:
# 409 indicates that the completion record already existed
# That's ok in this case
if e.code != 409:
raise
def update_upload_complete(self):
for u in self.workgroup["users"]:
self.mark_complete_stage(u["id"], "upload")
def graded_and_complete(self, group_id):
workgroup = self.project_api.get_workgroup_by_id(group_id)
for u in workgroup["users"]:
self.mark_complete_stage(u["id"], None)
def evaluations_complete(self):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
peer_review_components = [c for c in group_activity.activity_components if c.peer_reviews]
peer_review_questions = []
for prc in peer_review_components:
for sec in prc.peer_review_sections:
peer_review_questions.extend([q.id for q in sec.questions if q.required])
group_peer_items = self.project_api.get_peer_review_items_for_group(self.workgroup['id'], self.content_id)
my_feedback = {make_key(pri["user"], pri["question"]): pri["answer"] for pri in group_peer_items if pri['reviewer'] == self.xmodule_runtime.anonymous_student_id}
my_peers = [u for u in self.workgroup["users"] if u["id"] != self.user_id]
for peer in my_peers:
for q_id in peer_review_questions:
k = make_key(peer["id"], q_id)
if not k in my_feedback:
return False
if my_feedback[k] is None:
return False
if my_feedback[k] == '':
return False
return True
def grading_complete(self):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
group_review_components = [c for c in group_activity.activity_components if c.other_group_reviews]
group_review_questions = []
for prc in group_review_components:
for sec in prc.other_group_sections:
group_review_questions.extend([q.id for q in sec.questions if q.required])
group_review_items = []
assess_groups = self.project_api.get_workgroups_to_review(self.user_id, self.course_id, self.content_id)
for assess_group in assess_groups:
group_review_items.extend(self.project_api.get_workgroup_review_items_for_group(assess_group["id"], self.content_id))
my_feedback = {make_key(pri["workgroup"], pri["question"]): pri["answer"] for pri in group_review_items if pri['reviewer'] == self.xmodule_runtime.anonymous_student_id}
for assess_group in assess_groups:
for q_id in group_review_questions:
k = make_key(assess_group["id"], q_id)
if not k in my_feedback:
return False
if my_feedback[k] is None:
return False
if my_feedback[k] == '':
return False
return True
@XBlock.json_handler
def studio_submit(self, submissions, suffix=''):
self.display_name = submissions['display_name']
xml_content = submissions['data']
max_score = submissions['max_score']
group_reviews_required_count = submissions['group_reviews_required_count']
user_review_count = submissions['user_review_count']
if not max_score:
# empty = default
max_score = 100
else:
try:
# not an integer, then default
max_score = int(max_score)
except:
max_score = 100
self.weight = max_score
try:
group_reviews_required_count = int(group_reviews_required_count)
except:
group_reviews_required_count = 3
self.group_reviews_required_count = group_reviews_required_count
try:
user_review_count = int(user_review_count)
except:
user_review_count = 1
self.user_review_count = user_review_count
try:
etree.parse(StringIO(xml_content))
self.data = xml_content
except etree.XMLSyntaxError as e:
return {
'result': 'error',
'message': e.message
}
return {
'result': 'success',
}
@XBlock.json_handler
def submit_peer_feedback(self, submissions, suffix=''):
try:
peer_id = submissions["peer_id"]
del submissions["peer_id"]
# Then something like this needs to happen
self.project_api.submit_peer_review_items(
self.xmodule_runtime.anonymous_student_id,
peer_id,
self.workgroup['id'],
self.content_id,
submissions,
)
if self.evaluations_complete():
self.mark_complete_stage(self.user_id, "evaluation")
except Exception as e:
return {
'result': 'error',
'msg': e.message,
}
return {
'result': 'success',
'msg': _('Thanks for your feedback'),
}
@XBlock.json_handler
def submit_other_group_feedback(self, submissions, suffix=''):
try:
group_id = submissions["group_id"]
del submissions["group_id"]
self.project_api.submit_workgroup_review_items(
self.xmodule_runtime.anonymous_student_id,
group_id,
self.content_id,
submissions
)
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
for q_id in group_activity.grade_questions:
if q_id in submissions:
# Emit analytics event...
self.runtime.publish(
self,
"group_activity.received_grade_question_score",
{
"question": q_id,
"answer": submissions[q_id],
"reviewer_id": self.xmodule_runtime.anonymous_student_id,
"is_admin_grader": self.is_admin_grader,
"group_id": group_id,
"content_id": self.content_id,
}
)
grade_value = self.calculate_grade(group_id)
if grade_value:
self.assign_grade_to_group(group_id, grade_value)
self.graded_and_complete(group_id)
if self.is_group_member and self.grading_complete():
self.mark_complete_stage(self.user_id, "grade")
except Exception as e:
return {
'result': 'error',
'msg': e.message,
}
return {
'result': 'success',
'msg': _('Thanks for your feedback'),
}
@XBlock.handler
def load_peer_feedback(self, request, suffix=''):
peer_id = request.GET["peer_id"]
feedback = self.project_api.get_peer_review_items(
self.xmodule_runtime.anonymous_student_id,
peer_id,
self.workgroup['id'],
self.content_id,
)
# pivot the data to show question -> answer
results = {pi['question']: pi['answer'] for pi in feedback}
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def load_other_group_feedback(self, request, suffix=''):
group_id = request.GET["group_id"]
feedback = self.project_api.get_workgroup_review_items(
self.xmodule_runtime.anonymous_student_id,
group_id,
self.content_id
)
# pivot the data to show question -> answer
results = {ri['question']: ri['answer'] for ri in feedback}
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def load_my_peer_feedback(self, request, suffix=''):
user_id = self.user_id
feedback = self.project_api.get_user_peer_review_items(
user_id,
self.workgroup['id'],
self.content_id,
)
results = {}
for item in feedback:
if item['question'] in results:
results[item['question']].append(html.escape(item['answer']))
else:
results[item['question']] = [html.escape(item['answer'])]
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def load_my_group_feedback(self, request, suffix=''):
workgroup_id = self.workgroup['id']
feedback = self.project_api.get_workgroup_review_items_for_group(
workgroup_id,
self.content_id,
)
results = {}
for item in feedback:
if item['question'] in results:
results[item['question']].append(html.escape(item['answer']))
else:
results[item['question']] = [html.escape(item['answer'])]
final_grade = self.calculate_grade(workgroup_id)
if final_grade:
results["final_grade"] = [final_grade]
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def upload_submission(self, request, suffix=''):
response_data = {"message": _("File(s) successfully submitted")}
failure_code = 0
try:
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
context = {
"user_id": self.user_id,
"group_id": self.workgroup['id'],
"project_api": self.project_api,
"course_id": self.course_id
}
upload_files = [UploadFile(request.params[s.id].file, s.id, context)
for s in group_activity.submissions if s.id in request.params]
# Save the files first
for uf in upload_files:
try:
uf.save_file()
except Exception as save_file_error:
original_message = save_file_error.message if hasattr(save_file_error, "message") else ""
save_file_error.message = _("Error storing file {} - {}").format(uf.file.name, original_message)
raise
# They all got saved... note the submissions
at_least_one_success = False
for uf in upload_files:
try:
uf.submit()
# Emit analytics event...
self.runtime.publish(
self,
"group_activity.received_submission",
{
"submission_id": uf.submission_id,
"filename": uf.file.name,
"content_id": self.content_id,
"group_id": self.workgroup['id'],
"user_id": self.user_id,
}
)
at_least_one_success = True
except Exception as save_record_error:
original_message = save_record_error.message if hasattr(save_record_error, "message") else ""
save_record_error.message = _("Error recording file information {} - {}").format(uf.file.name, original_message)
raise
if at_least_one_success:
# See if the xBlock Notification Service is available, and - if so -
# dispatch a notification to the entire workgroup that a file has been uploaded
# Note that the NotificationService can be disabled, so it might not be available
# in the list of services
notifications_service = self.runtime.service(self, 'notifications')
if notifications_service:
self.fire_file_upload_notification(notifications_service)
response_data.update({uf.submission_id : uf.file_url for uf in upload_files})
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(self.workgroup['id'])
)
if group_activity.has_all_submissions:
self.update_upload_complete()
except Exception as e:
log.exception(e)
failure_code = 500
if isinstance(e, ApiError):
failure_code = e.code
if not hasattr(e, "message"):
e.message = _("Error uploading at least one file")
response_data.update({"message": e.message})
response = webob.response.Response(body=json.dumps(response_data))
if failure_code:
response.status_code = failure_code
return response
@XBlock.handler
def other_submission_links(self, request, suffix=''):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
group_id = request.GET["group_id"]
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(group_id)
)
html_output = render_template('/templates/html/review_submissions.html', {"group_activity": group_activity})
return webob.response.Response(body=json.dumps({"html":html_output}))
@XBlock.handler
def refresh_submission_links(self, request, suffix=''):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(self.workgroup['id'])
)
html_output = render_template('/templates/html/submission_links.html', {"group_activity": group_activity})
return webob.response.Response(body=json.dumps({"html":html_output}))
def get_courseware_info(self, courseware_parent_info_service):
activity_name = self.display_name
activity_location = None
stage_name = self.display_name
stage_location = None
try:
if courseware_parent_info_service:
# First get Unit (first parent)
stage_info = courseware_parent_info_service.get_parent_info(
self.location
)
stage_location = stage_info['location']
stage_name = stage_info['display_name']
# Then get Sequence (second parent)
activity_courseware_info = courseware_parent_info_service.get_parent_info(
stage_location
)
activity_name = activity_courseware_info['display_name']
activity_location = activity_courseware_info['location']
project_courseware_info = courseware_parent_info_service.get_parent_info(
activity_location
)
project_name = project_courseware_info['display_name']
project_location = project_courseware_info['location']
except Exception as ex:
# Can't look this up then log and just use the default
# which is our display_name
log.exception(ex)
return {
'stage_name': stage_name,
'stage_location': stage_location,
'activity_name': activity_name,
'activity_location': activity_location,
'project_name': project_name,
'project_location': project_location,
}
def fire_file_upload_notification(self, notifications_service):
try:
# this NotificationType is registered in the list of default Open edX Notifications
msg_type = notifications_service.get_notification_type('open-edx.xblock.group-project.file-uploaded')
workgroup_user_ids = []
uploader_username = ''
for user in self.workgroup['users']:
# don't send to ourselves
if user['id'] != self.user_id:
workgroup_user_ids.append(user['id'])
else:
uploader_username = user['username']
# get the activity name which is simply our hosting
# Sequence's Display Name, so call out to a new xBlock
# runtime Service
courseware_info = self.get_courseware_info(self.runtime.service(self, 'courseware_parent_info'))
activity_name = courseware_info['activity_name']
activity_location = courseware_info['activity_location']
msg = NotificationMessage(
msg_type=msg_type,
namespace=str(self.course_id),
payload={
'_schema_version': 1,
'action_username': uploader_username,
'activity_name': activity_name,
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
msg.add_click_link_params({
'course_id': str(self.course_id),
'activity_location': str(activity_location) if activity_location else '',
})
# NOTE: We're not using Celery here since we are expectating that we
# will have only a very small handful of workgroup users
notifications_service.bulk_publish_notification_to_users(
workgroup_user_ids,
msg
)
except Exception as ex:
# While we *should* send notification, if there is some
# error here, we don't want to blow the whole thing up.
# So log it and continue....
log.exception(ex)
def fire_grades_posted_notification(self, group_id, notifications_service):
try:
# this NotificationType is registered in the list of default Open edX Notifications
msg_type = notifications_service.get_notification_type('open-edx.xblock.group-project.grades-posted')
# get the activity name which is simply our hosting
# Sequence's Display Name, so call out to a new xBlock
# runtime Service
courseware_info = self.get_courseware_info(self.runtime.service(self, 'courseware_parent_info'))
activity_name = courseware_info['activity_name']
activity_location = courseware_info['activity_location']
msg = NotificationMessage(
msg_type=msg_type,
namespace=str(self.course_id),
payload={
'_schema_version': 1,
'activity_name': activity_name,
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
msg.add_click_link_params({
'course_id': str(self.course_id),
'activity_location': str(activity_location) if activity_location else '',
})
# Bulk publish to the 'group_project_workgroup' user scope
notifications_service.bulk_publish_notification_to_scope(
'group_project_workgroup',
{
# I think self.workgroup['id'] is a string version of an integer
'workgroup_id': group_id,
},
msg
)
except Exception as ex:
# While we *should* send notification, if there is some
# error here, we don't want to blow the whole thing up.
# So log it and continue....
log.exception(ex)
def _get_component_timer_name(self, component, timer_name_suffix):
return '{location}-{component}-{timer_name_suffix}'.format(
location=self.location,
component=component.id,
timer_name_suffix=timer_name_suffix
)
def _set_activity_timed_notification(self, course_id, activity, msg_type, component, milestone_date, send_at_date, services, timer_name_suffix):
component_name = component.name
notifications_service = services.get('notifications')
courseware_parent_info = services.get('courseware_parent_info')
courseware_info = self.get_courseware_info(courseware_parent_info)
activity_name = courseware_info['activity_name']
activity_location = courseware_info['activity_location']
project_location = courseware_info['project_location']
milestone_date_tz = milestone_date.replace(tzinfo=pytz.UTC)
send_at_date_tz = send_at_date.replace(tzinfo=pytz.UTC)
msg = NotificationMessage(
msg_type=notifications_service.get_notification_type(msg_type),
namespace=str(course_id),
payload={
'_schema_version': 1,
'activity_name': activity_name,
'stage': component_name,
'due_date': milestone_date_tz.strftime('%-m/%-d/%-y'),
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
msg.add_click_link_params({
'course_id': str(course_id),
'activity_location': str(activity_location),
})
notifications_service.publish_timed_notification(
msg=msg,
send_at=send_at_date_tz,
# send to all students participating in this project
scope_name='group_project_participants',
scope_context={
'course_id': str(course_id),
'content_id': str(project_location),
},
timer_name=self._get_component_timer_name(component, timer_name_suffix),
ignore_if_past_due=True # don't send if we're already late!
)
def on_studio_published(self, course_id, services):
"""
A hook into when this xblock is published in Studio. When we are published we should
register a Notification to be send on key dates
"""
try:
log.info('GroupProjectBlock.on_published() on location = {}'.format(self.location))
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
# see if we are running in an environment which has Notifications enabled
notifications_service = services.get('notifications')
if notifications_service:
# set (or update) Notification timed message based on
# the current key dates
for component in group_activity.activity_components:
# if the component has a opening date, then send a msg then
if component.open_date:
self._set_activity_timed_notification(
course_id,
group_activity,
'open-edx.xblock.group-project.stage-open',
component,
datetime.combine(component.open_date, datetime.min.time()),
datetime.combine(component.open_date, datetime.min.time()),
services,
'open'
)
# if the component has a close date, then send a msg then
if component.close_date:
self._set_activity_timed_notification(
course_id,
group_activity,
'open-edx.xblock.group-project.stage-due',
component,
datetime.combine(component.close_date, datetime.min.time()),
datetime.combine(component.close_date, datetime.min.time()),
services,
'due'
)
# and also send a notice 3 days earlier
self._set_activity_timed_notification(
course_id,
group_activity,
'open-edx.xblock.group-project.stage-due',
component,
datetime.combine(component.close_date, datetime.min.time()),
datetime.combine(component.close_date, datetime.min.time()) - timedelta(days=3),
services,
'coming-due'
)
except Exception as ex:
log.exception(ex)
def on_before_studio_delete(self, course_id, services):
"""
A hook into when this xblock is deleted in Studio, for xblocks to do any lifecycle
management
"""
log.info('GroupProjectBlock.on_before_delete() on location = {}'.format(self.location))
try:
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
# see if we are running in an environment which has Notifications enabled
notifications_service = services.get('notifications')
if notifications_service:
# If we are being delete, then we should remove any NotificationTimers that
# may have been registered before
for component in group_activity.activity_components:
notifications_service.cancel_timed_notification(
self._get_component_timer_name(component, 'open')
)
notifications_service.cancel_timed_notification(
self._get_component_timer_name(component, 'due')
)
notifications_service.cancel_timed_notification(
self._get_component_timer_name(component, 'coming-due')
)
except Exception as ex:
log.exception(ex)
|
agpl-3.0
| -3,804,872,931,286,464
| 37.793103
| 176
| 0.565062
| false
| 4.379325
| false
| false
| false
|
themattrix/python-simian
|
setup.py
|
1
|
1046
|
from setuptools import setup
setup(
name='simian',
version='2.0.0',
packages=('simian',),
url='https://github.com/themattrix/python-simian',
license='MIT',
author='Matthew Tardiff',
author_email='mattrix@gmail.com',
install_requires=('mock', 'contextlib2'),
tests_require=('nose', 'flake8'),
description=(
'A decorator for easily mocking out multiple dependencies by '
'monkey-patching.'),
classifiers=(
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'))
|
mit
| 159,269,272,641,297,660
| 37.740741
| 70
| 0.605163
| false
| 4.322314
| false
| false
| false
|
animalize/tz2txt
|
tz2txt/datamachine.py
|
1
|
18988
|
# coding=utf-8
## web 到 内部状态1(Reply)
## tz = web_to_internal(url, pg_count)
##
## 内部状态1 到 编排
## internal_to_bp(tz)
##
## ----------------------------
##
## 编排 到 内部状态2(BPReply)
## lst = bp_to_internal2(infile)
##
## 处理 内部状态2
## lst = process_internal2(lst)
##
## 内部状态2 到 编排
## internal2_to_bp(lst)
##
## ----------------------------
##
## 编排 到 最终
## bp_to_final(infile, keep_discard=True, label=0)
##
## ----------------------------
##
## 统计
## statistic(all_list)
from io import StringIO
from datetime import datetime
import itertools
try:
import winsound
except:
winsound = None
import color
from red import red
from fetcher import *
from tzdatastruct import *
from BaseProcessor import *
from AbPageParser import *
def save_print(print_str):
try:
print(print_str)
except UnicodeEncodeError:
for char in print_str:
try:
print(char, end='')
except:
print('?', end='')
print()
# 打印编排头信息
def print_bp_head(all_list):
for one in all_list:
if isinstance(one, str):
if one.startswith('<tiezi>'):
print_str = one[len('<tiezi>'):]
save_print(print_str)
elif isinstance(one, BPReply):
break
print()
# 统计
def statistic(all_list):
processor = BaseProcessor()
rlist = [one for one in all_list if isinstance(one, BPReply)]
processor.set_rlist(rlist)
processor.statistic()
def process_internal2(all_list):
'''处理中间形式2'''
def get_processor(all_list):
'''得到处理器'''
processor = None
if all_list:
p = red.re_dict(r'<processor:\s*(.*?)\s*>')
m = p.search(all_list[0])
if m:
local_processor = m.group(1)
processor = BaseProcessor.get_processor(local_processor)
return processor
# 找到处理器
processor = get_processor(all_list)
if not processor:
print('编排文本的首行没有指定自动处理器,不做处理\n例如:<processor: sample>')
return all_list
rlist = [one for one in all_list if isinstance(one, BPReply)]
print('共有{0}条回复,选择了{1}条回复。\n'.format(
len(rlist),
sum(1 for i in rlist if i.select)
)
)
processor.set_rlist(rlist)
processor.process()
print('共有{0}条回复,选择了{1}条回复。'.format(
len(rlist),
sum(1 for i in rlist if i.select and i.suggest)
)
)
return all_list
def reply_to_bp(reply, select):
'''回复->编排,鸭子类型'''
mark = '█' if select else ''
t = ('<time>◇◆◇◆◇◆◇◆◇◆◇ <',
reply.time.strftime('%Y-%m-%d %H:%M:%S %w'),
'> ◇◆◇◆◇◆◇◆◇◆◇\n',
reply.text,
'\n<mark>══════保留标记:', mark
)
return ''.join(t)
def internal2_to_bp(all_list):
'''中间形式2 到 编排文本'''
def to_bp(obj):
if isinstance(obj, str):
return obj
elif isinstance(obj, BPReply):
s = obj.select and obj.suggest
return reply_to_bp(obj, s)
if not all_list:
print('无法处理,请检查输入文件是否为编排文本')
return None
write_list = (to_bp(one) for one in all_list)
output = StringIO('\n'.join(write_list))
return output
def bp_to_internal2(infile):
'''编排文本 到 中间形式2'''
all_list = list()
pattern = red.re_dict(r'<(\d{4}-\d\d-\d\d\s+\d\d:\d\d:\d\d)')
dt = lambda s:datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
temp = list()
temp_date = None
in_reply = False
for line in infile.readlines():
line = line.rstrip('\n')
if line.startswith('<time>'):
if in_reply == True:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<mark>行')
break
m = pattern.search(line)
if not m:
print('无法解析日期')
break
temp_date = dt(m.group(1))
in_reply = True
elif line.startswith('<mark>'):
if in_reply == False:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<time>行')
break
if line.endswith('█'):
select = True
else:
select = False
# 添加回复
rpl = BPReply(temp_date, '\n'.join(temp), select)
all_list.append(rpl)
temp.clear()
in_reply = False
elif in_reply:
temp.append(line)
elif not in_reply:
all_list.append(line)
infile.close()
if in_reply == True:
print('格式错误:最后一个回复文本的前后包括标志不配对。')
return all_list
def count_chinese(string):
'''统计汉字字数,不含汉字标点符号'''
count = 0
for c in string:
c = ord(c)
# CJK统一汉字 20,950
# CJK统一汉字扩展A区 6,582
# CJK兼容汉字 472
# CJK统一汉字扩展B~E区 52844
if 0x4E00 <= c <= 0x9FFF or \
0x3400 <= c <= 0x4DBF or \
0xF900 <= c <= 0xFAFF or \
0x20000 <= c <= 0x2EBEF:
count += 1
return count
def bp_to_final(infile, keep_discard=True, label=0):
'''编译 编排to最终、丢弃'''
class placeholder:
def __init__(self, posi=0, pagenum=0, show=False):
self.posi = posi
self.pagenum = pagenum
self.show = show
def is_not_empty(lst):
for i in lst:
yield i.strip() != ''
info_list = list()
holder_list = [placeholder()]
text_list = list()
abandon_list = list()
pickcount, allcount = 0, 0
# 用于把 [img]http://img3.laibafile.cn/p/m/1234567.jpg[/img]
# 替换成 【图片:1234567.jpg】
picr = (r'\[img\s*(\d+|)\].*?\[/img\]')
pattern = red.re_dict(picr)
# 提取页号
re_pagenum = red.re_dict(r'^<page>页号:\s*(\d+)\s*$')
# 提取时间
p_time = (r'^<time>[^<]*<\d\d(\d\d-\d{1,2}-\d{1,2})\s+'
r'(\d{1,2}:\d{1,2})')
re_time = red.re_dict(p_time)
# 读取编排文本
in_reply = False
temp = list()
current_page = 0
current_time = ''
for line in infile.readlines():
if line.startswith('<time>'):
if in_reply == True:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<mark>行')
break
in_reply = True
# current_time
if label == 2:
m = re_time.search(line)
if m:
current_time = m.group(1) + ' ' + m.group(2)
else:
current_time = ''
elif line.startswith('<mark>'):
if in_reply == False:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<time>行')
break
if line.endswith('█\n') or line.endswith('█'):
pickcount += 1
if label == 0:
pass
elif label == 1:
holder_list[-1].show = True
elif label == 2:
floor_label = ('№.%d ☆☆☆'
' 发表于%s P.%d '
'☆☆☆\n'
'-------------------------'
'-------------------------'
'\n')
floor_label = floor_label % \
(pickcount, current_time, current_page)
text_list.append(floor_label)
text_list.extend(temp)
text_list.append('\n')
elif any(is_not_empty(temp)):
abandon_list.extend(temp)
abandon_list.append('∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞\n\n')
temp.clear()
allcount += 1
in_reply = False
elif in_reply:
line = pattern.sub(r'【一张图片\1】', line)
temp.append(line)
# 由于上一个elif,以下必定not in_reply
elif not text_list and not abandon_list and \
line.startswith('<tiezi>'):
info_list.append(line[len('<tiezi>'):])
elif label != 0:
m = re_pagenum.search(line)
if m:
current_page = int(m.group(1))
if label == 1:
text_list.append('')
holder = placeholder(len(text_list)-1,
current_page
)
holder_list.append(holder)
infile.close()
if in_reply == True:
print('格式错误:最后一个回复文本的前后包括标志不配对。')
# 页码 辅助格式
if label == 1:
for holder in holder_list[1:]:
if holder.show:
page_label = ('☆☆☆☆☆'
' 进入第%d页 '
'☆☆☆☆☆\n'
'----------------'
'----------------'
'\n\n') % holder.pagenum
text_list[holder.posi] = page_label
color_p1 = color.fore_color(allcount, color.Fore.YELLOW)
color_p2 = color.fore_color(pickcount, color.Fore.YELLOW)
print('共有{0}条回复,选择了其中{1}条回复'.format(color_p1, color_p2))
# output的内容============
# 连接
if info_list:
s_iter = itertools.chain(info_list, '\n', text_list)
else:
s_iter = iter(text_list)
s = ''.join(s_iter)
# 连续的多张图片
s = red.sub(r'(?:【一张图片(\d+|)】\s+){3,}',
r'【多张图片\1】\n\n',
s)
s = red.sub(r'(?:【一张图片(\d+|)】\s+){2}',
r'【两张图片\1】\n\n',
s)
# 输出StringIO
output = StringIO(s)
# 汉字字数
chinese_ct = count_chinese(s)
# 丢弃文本
if keep_discard and abandon_list:
s_iter = itertools.chain(info_list, '\n', abandon_list)
s = ''.join(s_iter)
discard = StringIO(s)
else:
discard = None
return output, discard, info_list, chinese_ct
def internal_to_bp(tz):
'''
内部形式 到 编排
返回(标题,输出文件字节)
'''
def page_to_g(page):
'''一页,返回:文本,摘取回复数,总回复数'''
rpls = [reply_to_bp(r, True) for r in page.replys]
pickcount = len(rpls)
allcount = len(page.replys)
if not pickcount:
return '', 0, allcount
else:
# 头信息
head = ('<page>页号: ', str(page.page_num), '\n',
'<page>网址: ', page.url, '\n',
'<page>有后页: ', str(page.finished), '\n',
'<page>总回复数: ', str(allcount),
' 摘取回复数: ', str(pickcount)
)
head = ''.join(head)
# 头信息 和 文本
s_iter = itertools.chain((head,), rpls, ('',))
s = '\n\n'.join(s_iter)
return s, pickcount, allcount
def tiezi_to_g(tiezi):
pgs = [page_to_g(p) for p in tiezi.pages]
text = (x for x,y,z in pgs if y > 0)
pickcount = sum(y for x,y,z in pgs)
allcount = sum(z for x,y,z in pgs)
color_p1 = color.fore_color(allcount, color.Fore.YELLOW)
color_p2 = color.fore_color(pickcount, color.Fore.YELLOW)
print('总回复数: {0} 摘取回复数: {1}'.format(color_p1, color_p2))
if not pickcount:
return None
else:
# 头信息
firstpg = tiezi.pages[0]
lastpg = tiezi.pages[-1]
processor_name = '<processor: ' + tiezi.local_processor + '>\n' \
if tiezi.local_processor \
else ''
fmark = '(未下载到末页)' if lastpg.finished else '(已下载到末页)'
post_time = '<tiezi>发帖时间:' + \
firstpg.replys[0].time.strftime('%Y-%m-%d %H:%M') + \
'\n' \
if firstpg.page_num == 1 and firstpg.replys \
else ''
head = (processor_name,
'<tiezi>标题:', tiezi.title, '\n',
'<tiezi>楼主:', tiezi.louzhu, '\n',
post_time,
'<tiezi>下载时间:',datetime.now().strftime('%Y-%m-%d %H:%M'),'\n',
'<tiezi>起始网址:', tiezi.begin_url, '\n',
'起始页号', str(firstpg.page_num),
',末尾页号', str(lastpg.page_num), ' ', fmark, '\n',
'总回复数: ', str(allcount),
' 摘取回复数: ', str(pickcount), '\n\n'
)
s_iter = itertools.chain(head, text)
s = ''.join(s_iter)
return s
#----------------------------------
# internal_to_bp(tz)开始
#----------------------------------
if not tz or not tz.pages:
print('一页也没有,不输出编排文件')
return None, ''
text = tiezi_to_g(tz)
if text == None:
print('\n没有摘取到回复,不输出文件')
return None, ''
# StringIO object
output = StringIO(text)
return output, tz.title
def web_to_internal(url, pg_count):
'''论坛帖子 到 内部形式'''
# 下载器
f = Fetcher()
# 页面解析器
parser = AbPageParser.get_parser(url)
if not parser:
return None
tz = Tiezi()
dl_count = 0
while True:
# 是否下载完指定页数
if pg_count >= 0 and dl_count >= pg_count:
print('下载完指定页数{0},停止下载\n'.format(pg_count))
break
# 下载数据
url = parser.pre_process_url(url)
data = f.fetch_url(url)
if not data:
print('无法读取页面:{0}'.format(url))
break
# 准备解析器
if dl_count == 0:
# 检查解析器
parser.set_page(url, data)
if not parser.check_parse_methods():
print(' 可能是网页改版,导致无法提取数据。')
print(' 请使用“检测新版本”功能检测是否有新程序可用。')
print()
return None
# 起始下载页
tz.begin_url = url
else:
# 送数据到解析器
parser.set_page(url, data)
# 设置tz的信息
if not tz.louzhu:
pub_date = None
tz.title = parser.wrap_get_title()
tz.louzhu = parser.wrap_get_louzhu()
# 首页1楼作楼主、发帖日期
if parser.wrap_get_page_num() == 1:
rplys = parser.wrap_get_replys()
if rplys:
if not tz.louzhu:
tz.louzhu = rplys[0].author
pub_date = rplys[0].time.strftime('%Y-%m-%d %H:%M')
# 手工输入楼主ID
if not tz.louzhu:
tz.louzhu = input('无法提取楼主ID,请手工输入楼主ID:').strip()
if not tz.louzhu:
print('无法得到楼主ID')
break
# 打印帖子信息
print_str = '标题:%s\n楼主:%s\n' % (tz.title, tz.louzhu)
if pub_date != None:
print_str += '发帖时间:%s\n' % pub_date
save_print(print_str)
# 得到本地格式名
tz.local_processor = parser.get_local_processor()
next_url = parser.wrap_get_next_pg_url()
pg_num = parser.wrap_get_page_num()
replys = parser.wrap_get_replys()
# 本页总回复
page_reply_count = len(replys)
# 只保留楼主
replys = [r for r in replys if r.author == tz.louzhu]
print('已下载第%d页, 有%d/%d条回复' %
(pg_num, len(replys), page_reply_count)
)
# 添加页
pg = Page(url,
pg_num,
bool(next_url),
replys
)
tz.add_page(pg)
dl_count += 1
# 帖子的最后一页?
if not next_url:
print('\n下载完帖子的最后一页(第{0}页),停止'.format(pg.page_num))
break
url = next_url
count = sum(len(p.replys) for p in tz.pages)
color_p1 = color.fore_color(len(tz.pages), color.Fore.YELLOW)
info = '共载入{pg_count}页,共有回复{rpl_count}条'.format(
pg_count=color_p1,
rpl_count=count
)
print(info)
# 发出响声
if winsound != None:
try:
winsound.Beep(400, 320) # (frequency, duration)
except:
pass
# 转义编排文本的标签
def escape_bp_tag(text):
# 转义编排标签
text = red.sub(r'^(<(?:time|mark)>)',
r'#\1',
text,
flags=red.MULTILINE)
# 【引用开始】、【引用结束】
text = red.sub(r'【(引用(?:开始|结束)|补充回复)】',
r'[\1]',
text)
# 标记的处理信息
if text.endswith('【与上一条回复重复】') \
or text.endswith('【无法处理的回复】'):
text = text + '#'
return text
for p in tz.pages:
for r in p.replys:
r.text = escape_bp_tag(r.text)
return tz
|
bsd-3-clause
| 8,837,727,428,784,550,000
| 26.571429
| 82
| 0.433997
| false
| 2.86795
| false
| false
| false
|
partofthething/home-assistant
|
homeassistant/components/netatmo/data_handler.py
|
1
|
6057
|
"""The Netatmo data handler."""
from collections import deque
from datetime import timedelta
from functools import partial
from itertools import islice
import logging
from time import time
from typing import Deque, Dict, List
import pyatmo
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_time_interval
from .const import AUTH, DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
CAMERA_DATA_CLASS_NAME = "CameraData"
WEATHERSTATION_DATA_CLASS_NAME = "WeatherStationData"
HOMECOACH_DATA_CLASS_NAME = "HomeCoachData"
HOMEDATA_DATA_CLASS_NAME = "HomeData"
HOMESTATUS_DATA_CLASS_NAME = "HomeStatus"
PUBLICDATA_DATA_CLASS_NAME = "PublicData"
NEXT_SCAN = "next_scan"
DATA_CLASSES = {
WEATHERSTATION_DATA_CLASS_NAME: pyatmo.WeatherStationData,
HOMECOACH_DATA_CLASS_NAME: pyatmo.HomeCoachData,
CAMERA_DATA_CLASS_NAME: pyatmo.CameraData,
HOMEDATA_DATA_CLASS_NAME: pyatmo.HomeData,
HOMESTATUS_DATA_CLASS_NAME: pyatmo.HomeStatus,
PUBLICDATA_DATA_CLASS_NAME: pyatmo.PublicData,
}
BATCH_SIZE = 3
DEFAULT_INTERVALS = {
HOMEDATA_DATA_CLASS_NAME: 900,
HOMESTATUS_DATA_CLASS_NAME: 300,
CAMERA_DATA_CLASS_NAME: 900,
WEATHERSTATION_DATA_CLASS_NAME: 600,
HOMECOACH_DATA_CLASS_NAME: 300,
PUBLICDATA_DATA_CLASS_NAME: 600,
}
SCAN_INTERVAL = 60
class NetatmoDataHandler:
"""Manages the Netatmo data handling."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize self."""
self.hass = hass
self._auth = hass.data[DOMAIN][entry.entry_id][AUTH]
self.listeners: List[CALLBACK_TYPE] = []
self._data_classes: Dict = {}
self.data = {}
self._queue: Deque = deque()
self._webhook: bool = False
async def async_setup(self):
"""Set up the Netatmo data handler."""
async_track_time_interval(
self.hass, self.async_update, timedelta(seconds=SCAN_INTERVAL)
)
self.listeners.append(
async_dispatcher_connect(
self.hass,
f"signal-{DOMAIN}-webhook-None",
self.handle_event,
)
)
async def async_update(self, event_time):
"""
Update device.
We do up to BATCH_SIZE calls in one update in order
to minimize the calls on the api service.
"""
for data_class in islice(self._queue, 0, BATCH_SIZE):
if data_class[NEXT_SCAN] > time():
continue
self._data_classes[data_class["name"]][NEXT_SCAN] = (
time() + data_class["interval"]
)
await self.async_fetch_data(
data_class["class"], data_class["name"], **data_class["kwargs"]
)
self._queue.rotate(BATCH_SIZE)
async def async_cleanup(self):
"""Clean up the Netatmo data handler."""
for listener in self.listeners:
listener()
async def handle_event(self, event):
"""Handle webhook events."""
if event["data"]["push_type"] == "webhook_activation":
_LOGGER.info("%s webhook successfully registered", MANUFACTURER)
self._webhook = True
elif event["data"]["push_type"] == "webhook_deactivation":
_LOGGER.info("%s webhook unregistered", MANUFACTURER)
self._webhook = False
elif event["data"]["push_type"] == "NACamera-connection":
_LOGGER.debug("%s camera reconnected", MANUFACTURER)
self._data_classes[CAMERA_DATA_CLASS_NAME][NEXT_SCAN] = time()
async def async_fetch_data(self, data_class, data_class_entry, **kwargs):
"""Fetch data and notify."""
try:
self.data[data_class_entry] = await self.hass.async_add_executor_job(
partial(data_class, **kwargs),
self._auth,
)
for update_callback in self._data_classes[data_class_entry][
"subscriptions"
]:
if update_callback:
update_callback()
except pyatmo.NoDevice as err:
_LOGGER.debug(err)
self.data[data_class_entry] = None
except pyatmo.ApiError as err:
_LOGGER.debug(err)
async def register_data_class(
self, data_class_name, data_class_entry, update_callback, **kwargs
):
"""Register data class."""
if data_class_entry in self._data_classes:
self._data_classes[data_class_entry]["subscriptions"].append(
update_callback
)
return
self._data_classes[data_class_entry] = {
"class": DATA_CLASSES[data_class_name],
"name": data_class_entry,
"interval": DEFAULT_INTERVALS[data_class_name],
NEXT_SCAN: time() + DEFAULT_INTERVALS[data_class_name],
"kwargs": kwargs,
"subscriptions": [update_callback],
}
await self.async_fetch_data(
DATA_CLASSES[data_class_name], data_class_entry, **kwargs
)
self._queue.append(self._data_classes[data_class_entry])
_LOGGER.debug("Data class %s added", data_class_entry)
async def unregister_data_class(self, data_class_entry, update_callback):
"""Unregister data class."""
if update_callback not in self._data_classes[data_class_entry]["subscriptions"]:
return
self._data_classes[data_class_entry]["subscriptions"].remove(update_callback)
if not self._data_classes[data_class_entry].get("subscriptions"):
self._queue.remove(self._data_classes[data_class_entry])
self._data_classes.pop(data_class_entry)
_LOGGER.debug("Data class %s removed", data_class_entry)
@property
def webhook(self) -> bool:
"""Return the webhook state."""
return self._webhook
|
mit
| -5,802,619,240,173,583,000
| 32.65
| 88
| 0.616312
| false
| 3.80704
| false
| false
| false
|
hrpt-se/hrpt
|
urls.py
|
1
|
2104
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import RedirectView, TemplateView
from django.views.static import serve
from contact_form.views import ContactFormView
from apps.partnersites.views import colors_css
from apps.pollster.views import map_tile, map_click, chart_data
from apps.hrptinfo.forms import CaptchaContactForm
admin.autodiscover()
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^admin/manual-newsletters/', include('apps.reminder.nladminurls')),
url(r'^admin/surveys-editor/', include('apps.pollster.urls')),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)/tile/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)$',
map_tile, name='pollster_map_tile'),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)/click/(?P<lat>[\d.-]+)/(?P<lng>[\d.-]+)$',
map_click, name='pollster_map_click'),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)\.json$', chart_data,
name='pollster_chart_data'),
url(r'^survey/', include('apps.survey.urls')),
url(r'^reminder/', include('apps.reminder.urls')),
url(r'^registrera/$', RedirectView.as_view(url='/accounts/register')),
url(r'^accounts/', include('apps.accounts.urls')),
url(r'^login/', include('loginurl.urls')),
url(r'^count/', include('apps.count.urls')),
url(r'^contact/$', ContactFormView.as_view(form_class=CaptchaContactForm), name='contact_form'),
url(r'^contact/sent/$', TemplateView.as_view(template_name='contact_form/contact_form_sent.html'),
name='contact_form_sent'),
url(r'^colors.css$', colors_css)
]
# Catchall
urlpatterns += i18n_patterns(
url(r'^', include('cms.urls'))
)
if settings.DEBUG:
urlpatterns = [
url(r'^upload/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT})
] + staticfiles_urlpatterns() + urlpatterns
|
agpl-3.0
| 1,752,537,007,993,838,300
| 43.765957
| 117
| 0.673954
| false
| 3.302983
| false
| true
| false
|
lantianlz/zx
|
scripts/crontab/worker_delay_monitor.py
|
1
|
1433
|
# -*- coding: utf-8 -*-
"""
@note: 分析ngxin日志,提取出频繁访问网站的ip
"""
import sys
import os
# 引入父目录来引入其他模块
SITE_ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.extend([os.path.abspath(os.path.join(SITE_ROOT, '../')),
os.path.abspath(os.path.join(SITE_ROOT, '../../')),
])
os.environ['DJANGO_SETTINGS_MODULE'] = 'www.settings'
from common import cache
from common.utils import send_email
from django.conf import settings
WORKER_CONFIG = [
{
'name': 'email_worker',
'limit': 200,
},
]
def get_delay_count(key):
cache_obj = cache.Cache(cache.CACHE_WORKER)
return cache_obj.llen(key)
def main():
warn_list = []
for item in WORKER_CONFIG:
count = get_delay_count(item['name'])
print u'---%s----%s----' % (item['name'], count)
if count > item.get('limit'):
item['count'] = count
warn_list.append(item)
if warn_list:
title = u'%s主机 worker积压警告' % (settings.SERVER_NAME, )
content = u''
for item in warn_list:
content += u'%(name)s:积压任务数%(count)s, 警戒值为%(limit)s\n' % item
send_email(emails=settings.NOTIFICATION_EMAIL, title=title, content=content, type="text")
print 'ok'
if __name__ == '__main__':
main()
|
gpl-2.0
| 7,332,729,005,844,852,000
| 22.490909
| 97
| 0.551596
| false
| 2.859873
| false
| false
| false
|
decebel/dataAtom_alpha
|
bin/plug/py/external/pattern/text/de/parser/__init__.py
|
1
|
8935
|
#### PATTERN | DE | RULE-BASED SHALLOW PARSER ######################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Gerold Schneider, Martin Volk and University of Antwerp, Belgium
# Authors: Gerold Schneider & Martin Volk (German language model), Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import re
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
# The tokenizer, chunker and relation finder are inherited from pattern.en.parser.
# The tagger is based on Schneider & Volk's German language model:
# Schneider, G., Volk, M. (1998). Adding Manual Constraints and Lexical Look-up to a Brill-Tagger for German.
# In: Proceedings of the ESSLLI workshop on recent advances in corpus annotation. Saarbrucken, Germany.
# http://www.zora.uzh.ch/28579/
# Accuracy is reported around 96%, but Pattern scores may vary from Schneider & Volk's original
# due to STTS => Penn Treebank mapping etc.
import sys; sys.path.insert(0, os.path.join(MODULE, "..", ".."))
from en.parser import Lexicon
from en.parser import PUNCTUATION, tokenize as _en_tokenize, parse as _en_parse, TaggedString
from en.parser import commandline
#### TOKENIZER #####################################################################################
ABBREVIATIONS = [
"Abs.", "Abt.", "Ass.", "Br.", "Ch.", "Chr.", "Cie.", "Co.", "Dept.", "Diff.",
"Dr.", "Eidg.", "Exp.", "Fam.", "Fr.", "Hrsg.", "Inc.", "Inv.", "Jh.", "Jt.", "Kt.",
"Mio.", "Mrd.", "Mt.", "Mte.", "Nr.", "Nrn.", "Ord.", "Ph.", "Phil.", "Pkt.",
"Prof.", "Pt.", " S.", "St.", "Stv.", "Tit.", "VII.", "al.", "begr.","bzw.",
"chem.", "dent.", "dipl.", "e.g.", "ehem.", "etc.", "excl.", "exkl.", "hum.",
"i.e.", "incl.", "ing.", "inkl.", "int.", "iur.", "lic.", "med.", "no.", "oec.",
"phil.", "phys.", "pp.", "psych.", "publ.", "rer.", "sc.", "soz.", "spez.", "stud.",
"theol.", "usw.", "vet.", "vgl.", "vol.", "wiss.",
"d.h.", "h.c.", u"o.ä.", "u.a.", "z.B.", "z.T.", "z.Zt."
]
def tokenize(s, punctuation=PUNCTUATION, abbreviations=ABBREVIATIONS, replace={}):
return _en_tokenize(s, punctuation, abbreviations, replace)
_tokenize = tokenize
#### LEMMATIZER ####################################################################################
# Word lemmas using singularization and verb conjugation from the inflect module.
try:
from ..inflect import singularize, conjugate, predicative
except:
try:
sys.path.append(os.path.join(MODULE, ".."))
from inflect import singularize, conjugate, predicative
except:
try:
from pattern.de.inflect import singularize, conjugate, predicative
except:
singularize = lambda w: w
conjugate = lambda w, t: w
predicative = lambda w: w
def lemma(word, pos="NN"):
if pos == "NNS":
return singularize(word)
if pos.startswith(("VB","MD")):
return conjugate(word, "infinitive") or word
if pos.startswith(("DT", "JJ")):
return predicative(word)
return word
def find_lemmata(tagged):
for token in tagged:
token.append(lemma(token[0].lower(), pos=len(token) > 1 and token[1] or None))
return tagged
#### PARSER ########################################################################################
# pattern.en.find_tags() has an optional "lexicon" parameter.
# We'll pass the German lexicon to it instead of the default English lexicon:
lexicon = LEXICON = Lexicon()
lexicon.path = os.path.join(MODULE, "brill-lexicon.txt")
lexicon.lexical_rules.path = os.path.join(MODULE, "brill-lexical.txt")
lexicon.contextual_rules.path = os.path.join(MODULE, "brill-contextual.txt")
lexicon.named_entities.tag = "NE"
# Stuttgart/Tubinger Tagset (STTS):
# https://files.ifi.uzh.ch/cl/tagger/UIS-STTS-Diffs.html
PENN = PENNTREEBANK = TREEBANK = "penntreebank"
STTS = "stts"
stts = {
"ADJA": "JJ", # das große Haus
"ADJD": "JJ", # er ist schnell
"ADV": "RB", # schon
"APPR": "IN", # in der Stadt
"APPRART": "IN", # im Haus
"APPO": "IN", # der Sache wegen
"APZR": "IN", # von jetzt an
"ART": "DT", # der, die, eine
"ARTDEF": "DT", # der, die
"ARTIND": "DT", # eine
"CARD": "CD", # zwei
"CARDNUM": "CD", # 3
"KOUI": "IN", # [um] zu leben
"KOUS": "IN", # weil, damit, ob
"KON": "CC", # und, oder, aber
"KOKOM": "IN", # als, wie
"KONS": "IN", # usw.
"NN": "NN", # Tisch, Herr
"NNS": "NNS", # Tischen, Herren
"NE": "NNP", # Hans, Hamburg
"PDS": "DT", # dieser, jener
"PDAT": "DT", # jener Mensch
"PIS": "DT", # keiner, viele, niemand
"PIAT": "DT", # kein Mensch
"PIDAT": "DT", # die beiden Brüder
"PPER": "PRP", # ich, er, ihm, mich, dir
"PPOS": "PRP$", # meins, deiner
"PPOSAT": "PRP$", # mein Buch, deine Mutter
"PRELS": "WDT", # der Hund, [der] bellt
"PRELAT": "WDT", # der Mann, [dessen] Hund bellt
"PRF": "PRP", # erinnere [dich]
"PWS": "WP", # wer
"PWAT": "WP", # wessen, welche
"PWAV": "WRB", # warum, wo, wann
"PAV": "RB", # dafur, dabei, deswegen, trotzdem
"PTKZU": "TO", # zu gehen, zu sein
"PTKNEG": "RB", # nicht
"PTKVZ": "RP", # pass [auf]!
"PTKANT": "UH", # ja, nein, danke, bitte
"PTKA": "RB", # am schönsten, zu schnell
"VVFIN": "VB", # du [gehst], wir [kommen] an
"VAFIN": "VB", # du [bist], wir [werden]
"VVINF": "VB", # gehen, ankommen
"VAINF": "VB", # werden, sein
"VVIZU": "VB", # anzukommen
"VVIMP": "VB", # [komm]!
"VAIMP": "VB", # [sei] ruhig!
"VVPP": "VBN", # gegangen, angekommen
"VAPP": "VBN", # gewesen
"VMFIN": "MD", # dürfen
"VMINF": "MD", # wollen
"VMPP": "MD", # gekonnt
"SGML": "SYM", #
"FM": "FW", #
"ITJ": "UH", # ach, tja
"XY": "NN", #
"XX": "NN", #
"LINUM": "LS", # 1.
"C": ",", # ,
"Co": ":", # :
"Ex": ".", # !
"Pc": ")", # )
"Po": "(", # (
"Q": ".", # ?
"QMc": "\"", # "
"QMo": "\"", # "
"S": ".", # .
"Se": ":", # ;
}
def stts2penntreebank(tag):
""" Converts an STTS tag to Penn Treebank II tag.
For example: ohne APPR => ohne/IN
"""
return stts.get(tag, tag)
def parse(s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", **kwargs):
""" Takes a string (sentences) and returns a tagged Unicode string.
Sentences in the output are separated by newlines.
"""
if tokenize:
s = _tokenize(s)
if isinstance(s, (list, tuple)):
s = [isinstance(s, basestring) and s.split(" ") or s for s in s]
if isinstance(s, basestring):
s = [s.split(" ") for s in s.split("\n")]
# Reuse the English parser:
kwargs.update({
"lemmata": False,
"light": False,
"lexicon": LEXICON,
"language": "de",
"default": "NN",
"map": kwargs.get("tagset", "") != STTS and stts2penntreebank or None,
})
# The German lexicon uses "ss" instead of "ß".
# Instead of simply replacing it, we keep a hash map of the normalized words.
# After parsing we restore the "ß" so the output stays identical to the input.
m = dict((token.replace(u"ß", "ss"), token) for sentence in s for token in sentence)
s = [[token.replace(u"ß", "ss") for token in sentence] for sentence in s]
s = _en_parse(s, False, tags, chunks, relations, **kwargs)
p = [[[m[token[0]]] + token[1:] for token in sentence] for sentence in s.split()]
p = "\n".join([" ".join(["/".join(token) for token in sentence]) for sentence in p])
s = TaggedString(p, tags=s.tags, language="de")
# Use pattern.de.inflect for lemmatization:
if lemmata:
p = [find_lemmata(sentence) for sentence in s.split()]
p = "\n".join([" ".join(["/".join(token) for token in sentence]) for sentence in p])
s = TaggedString(p, tags=s.tags+["lemma"], language="de")
return s
def tag(s, tokenize=True, encoding="utf-8"):
""" Returns a list of (token, tag)-tuples from the given string.
"""
tags = []
for sentence in parse(s, tokenize, True, False, False, False, encoding).split():
for token in sentence:
tags.append((token[0], token[1]))
return tags
#### COMMAND LINE ##################################################################################
# From the folder that contains the "pattern" folder:
# python -m pattern.de.parser xml -s "Ein Unglück kommt selten allein." -OTCLI
if __name__ == "__main__":
commandline(parse)
|
apache-2.0
| 4,828,502,076,624,707,000
| 39.572727
| 112
| 0.531541
| false
| 2.922397
| false
| false
| false
|
guoci/python3-xlib-trunk
|
Xlib/protocol/rq.py
|
1
|
46900
|
# Xlib.protocol.rq -- structure primitives for request, events and errors
#
# Copyright (C) 2000-2002 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Standard modules
import sys
import traceback
import struct
from array import array
import types
# Xlib modules
from Xlib import X
from Xlib.support import lock
_PY3 = sys.version[0] >= '3'
# in Python 3, bytes are an actual array; in python 2, bytes are still
# string-like, so in order to get an array element we need to call ord()
if _PY3:
def _bytes_item(x):
return x
else:
def _bytes_item(x):
return ord(x)
class BadDataError(Exception): pass
# These are struct codes, we know their byte sizes
signed_codes = { 1: 'b', 2: 'h', 4: 'l' }
unsigned_codes = { 1: 'B', 2: 'H', 4: 'L' }
# Unfortunately, we don't know the array sizes of B, H and L, since
# these use the underlying architecture's size for a char, short and
# long. Therefore we probe for their sizes, and additionally create
# a mapping that translates from struct codes to array codes.
#
# Bleah.
array_unsigned_codes = { }
struct_to_array_codes = { }
for c in 'bhil':
size = array(c).itemsize
array_unsigned_codes[size] = c.upper()
try:
struct_to_array_codes[signed_codes[size]] = c
struct_to_array_codes[unsigned_codes[size]] = c.upper()
except KeyError:
pass
# print array_unsigned_codes, struct_to_array_codes
class Field:
"""Field objects represent the data fields of a Struct.
Field objects must have the following attributes:
name -- the field name, or None
structcode -- the struct codes representing this field
structvalues -- the number of values encodes by structcode
Additionally, these attributes should either be None or real methods:
check_value -- check a value before it is converted to binary
parse_value -- parse a value after it has been converted from binary
If one of these attributes are None, no check or additional
parsings will be done one values when converting to or from binary
form. Otherwise, the methods should have the following behaviour:
newval = check_value(val)
Check that VAL is legal when converting to binary form. The
value can also be converted to another Python value. In any
case, return the possibly new value. NEWVAL should be a
single Python value if structvalues is 1, a tuple of
structvalues elements otherwise.
newval = parse_value(val, display)
VAL is an unpacked Python value, which now can be further
refined. DISPLAY is the current Display object. Return the
new value. VAL will be a single value if structvalues is 1,
a tuple of structvalues elements otherwise.
If `structcode' is None the Field must have the method
f.parse_binary_value() instead. See its documentation string for
details.
"""
name = None
default = None
structcode = None
structvalues = 0
check_value = None
parse_value = None
keyword_args = 0
def __init__(self):
pass
def parse_binary_value(self, data, display, length, format):
"""value, remaindata = f.parse_binary_value(data, display, length, format)
Decode a value for this field from the binary string DATA.
If there are a LengthField and/or a FormatField connected to this
field, their values will be LENGTH and FORMAT, respectively. If
there are no such fields the parameters will be None.
DISPLAY is the display involved, which is really only used by
the Resource fields.
The decoded value is returned as VALUE, and the remaining part
of DATA shold be returned as REMAINDATA.
"""
raise RuntimeError('Neither structcode or parse_binary_value provided for %s'
% self)
class Pad(Field):
def __init__(self, size):
self.size = size
self.value = b'\0' * size
self.structcode = '%dx' % size
self.structvalues = 0
class ConstantField(Field):
def __init__(self, value):
self.value = value
class Opcode(ConstantField):
structcode = 'B'
structvalues = 1
class ReplyCode(ConstantField):
structcode = 'B'
structvalues = 1
def __init__(self):
self.value = 1
class LengthField(Field):
"""A LengthField stores the length of some other Field whose size
may vary, e.g. List and String8.
Its name should be the same as the name of the field whose size
it stores. The other_fields attribute can be used to specify the
names of other fields whose sizes are stored by this field, so
a single length field can set the length of multiple fields.
The lf.get_binary_value() method of LengthFields is not used, instead
a lf.get_binary_length() should be provided.
Unless LengthField.get_binary_length() is overridden in child classes,
there should also be a lf.calc_length().
"""
structcode = 'L'
structvalues = 1
other_fields = None
def calc_length(self, length):
"""newlen = lf.calc_length(length)
Return a new length NEWLEN based on the provided LENGTH.
"""
return length
class TotalLengthField(LengthField):
pass
class RequestLength(TotalLengthField):
structcode = 'H'
structvalues = 1
def calc_length(self, length):
return length // 4
class ReplyLength(TotalLengthField):
structcode = 'L'
structvalues = 1
def calc_length(self, length):
return (length - 32) // 4
class LengthOf(LengthField):
def __init__(self, name, size):
if isinstance(name, (list, tuple)):
self.name = name[0]
self.other_fields = name[1:]
else:
self.name = name
self.structcode = unsigned_codes[size]
class OddLength(LengthField):
structcode = 'B'
structvalues = 1
def __init__(self, name):
self.name = name
def calc_length(self, length):
return length % 2
def parse_value(self, value, display):
if value == 0:
return 'even'
else:
return 'odd'
class FormatField(Field):
"""A FormatField encodes the format of some other field, in a manner
similar to LengthFields.
The ff.get_binary_value() method is not used, replaced by
ff.get_binary_format().
"""
structvalues = 1
def __init__(self, name, size):
self.name = name
self.structcode = unsigned_codes[size]
Format = FormatField
class ValueField(Field):
def __init__(self, name, default = None):
self.name = name
self.default = default
class Int8(ValueField):
structcode = 'b'
structvalues = 1
class Int16(ValueField):
structcode = 'h'
structvalues = 1
class Int32(ValueField):
structcode = 'l'
structvalues = 1
class Card8(ValueField):
structcode = 'B'
structvalues = 1
class Card16(ValueField):
structcode = 'H'
structvalues = 1
class Card32(ValueField):
structcode = 'L'
structvalues = 1
class Resource(Card32):
cast_function = '__resource__'
class_name = 'resource'
def __init__(self, name, codes = (), default = None):
Card32.__init__(self, name, default)
self.codes = codes
def check_value(self, value):
try:
return getattr(value, self.cast_function)()
except AttributeError:
return value
def parse_value(self, value, display):
# if not display:
# return value
if value in self.codes:
return value
c = display.get_resource_class(self.class_name)
if c:
return c(display, value)
else:
return value
class Window(Resource):
cast_function = '__window__'
class_name = 'window'
class Pixmap(Resource):
cast_function = '__pixmap__'
class_name = 'pixmap'
class Drawable(Resource):
cast_function = '__drawable__'
class_name = 'drawable'
class Fontable(Resource):
cast_function = '__fontable__'
class_name = 'fontable'
class Font(Resource):
cast_function = '__font__'
class_name = 'font'
class GC(Resource):
cast_function = '__gc__'
class_name = 'gc'
class Colormap(Resource):
cast_function = '__colormap__'
class_name = 'colormap'
class Cursor(Resource):
cast_function = '__cursor__'
class_name = 'cursor'
class Bool(ValueField):
structvalues = 1
structcode = 'B'
def check_value(self, value):
return not not value
class Set(ValueField):
structvalues = 1
def __init__(self, name, size, values, default = None):
ValueField.__init__(self, name, default)
self.structcode = unsigned_codes[size]
self.values = values
def check_value(self, val):
if val not in self.values:
raise ValueError('field %s: argument %s not in %s'
% (self.name, val, self.values))
return val
class Gravity(Set):
def __init__(self, name):
Set.__init__(self, name, 1, (X.ForgetGravity, X.StaticGravity,
X.NorthWestGravity, X.NorthGravity,
X.NorthEastGravity, X.WestGravity,
X.CenterGravity, X.EastGravity,
X.SouthWestGravity, X.SouthGravity,
X.SouthEastGravity))
class FixedString(ValueField):
structvalues = 1
def __init__(self, name, size):
ValueField.__init__(self, name)
self.structcode = '%ds' % size
class String8(ValueField):
structcode = None
def __init__(self, name, pad = 1):
ValueField.__init__(self, name)
self.pad = pad
def pack_value(self, val):
slen = len(val)
val = val.encode('UTF-8')
# if _PY3 and type(val) is str:
# val = val.encode('UTF-8')
if self.pad:
return val + b'\0' * ((4 - slen % 4) % 4), slen, None
else:
return val, slen, None
def parse_binary_value(self, data, display, length, format):
if length is None:
try:
return data.decode('UTF-8'), b''
except UnicodeDecodeError:
return data, b''
if self.pad:
slen = length + ((4 - length % 4) % 4)
else:
slen = length
s = data[:length]
try:
s = s.decode('UTF-8')
except UnicodeDecodeError:
pass # return as bytes
return s, data[slen:]
class String16(ValueField):
structcode = None
def __init__(self, name, pad = 1):
ValueField.__init__(self, name)
self.pad = pad
def pack_value(self, val):
# Convert 8-byte string into 16-byte list
if type(val) is str:
val = [ord(c) for c in val]
slen = len(val)
if self.pad:
pad = b'\0\0' * (slen % 2)
else:
pad = b''
return (struct.pack(*('>' + 'H' * slen, ) + tuple(val)) + pad,
slen, None)
def parse_binary_value(self, data, display, length, format):
if length == 'odd':
length = len(data) // 2 - 1
elif length == 'even':
length = len(data) // 2
if self.pad:
slen = length + (length % 2)
else:
slen = length
return (struct.unpack('>' + 'H' * length, data[:length * 2]),
data[slen * 2:])
class List(ValueField):
"""The List, FixedList and Object fields store compound data objects.
The type of data objects must be provided as an object with the
following attributes and methods:
...
"""
structcode = None
def __init__(self, name, type, pad = 1):
ValueField.__init__(self, name)
self.type = type
self.pad = pad
def parse_binary_value(self, data, display, length, format):
if length is None:
ret = []
if self.type.structcode is None:
while data:
val, data = self.type.parse_binary(data, display)
ret.append(val)
else:
scode = '=' + self.type.structcode
slen = struct.calcsize(scode)
pos = 0
while pos + slen <= len(data):
v = struct.unpack(scode, data[pos: pos + slen])
if self.type.structvalues == 1:
v = v[0]
if self.type.parse_value is None:
ret.append(v)
else:
ret.append(self.type.parse_value(v, display))
pos = pos + slen
data = data[pos:]
else:
ret = [None] * int(length)
if self.type.structcode is None:
for i in range(0, length):
ret[i], data = self.type.parse_binary(data, display)
else:
scode = '=' + self.type.structcode
slen = struct.calcsize(scode)
pos = 0
for i in range(0, length):
v = struct.unpack(scode, data[pos: pos + slen])
if self.type.structvalues == 1:
v = v[0]
if self.type.parse_value is None:
ret[i] = v
else:
ret[i] = self.type.parse_value(v, display)
pos = pos + slen
data = data[pos:]
if self.pad:
data = data[len(data) % 4:]
return ret, data
def pack_value(self, val):
# Single-char values, we'll assume that means integer lists.
if self.type.structcode and len(self.type.structcode) == 1:
if self.type.check_value is not None:
val = [self.type.check_value(v) for v in val]
data = array(struct_to_array_codes[self.type.structcode],
val).tobytes()
else:
data = []
for v in val:
data.append(self.type.pack_value(v))
data = b''.join(data)
if self.pad:
dlen = len(data)
data = data + b'\0' * ((4 - dlen % 4) % 4)
return data, len(val), None
class FixedList(List):
def __init__(self, name, size, type, pad = 1):
List.__init__(self, name, type, pad)
self.size = size
def parse_binary_value(self, data, display, length, format):
return List.parse_binary_value(self, data, display, self.size, format)
def pack_value(self, val):
if len(val) != self.size:
raise BadDataError('length mismatch for FixedList %s' % self.name)
return List.pack_value(self, val)
class Object(ValueField):
def __init__(self, name, type, default = None):
ValueField.__init__(self, name, default)
self.type = type
self.structcode = self.type.structcode
self.structvalues = self.type.structvalues
def parse_binary_value(self, data, display, length, format):
return self.type.parse_binary(data, display)
def parse_value(self, val, display):
return self.type.parse_value(val, display)
def pack_value(self, val):
return self.type.pack_value(val)
def check_value(self, val):
if type(val) is tuple:
return val #TODO_PY3, reverted this to r135.
# code below added at r159
vals = []
i = 0
for f in self.type.fields:
if f.name:
if f.check_value is None:
v = val[i]
else:
v = f.check_value(val[i])
if f.structvalues == 1:
vals.append(v)
else:
vals.extend(v)
i = i + 1
return vals
if type(val) is dict:
data = val
elif isinstance(val, DictWrapper):
data = val._data
else:
raise TypeError('Object value must be tuple, dictionary or DictWrapper: %s' % val)
vals = []
for f in self.type.fields:
if f.name:
if f.check_value is None:
v = data[f.name]
else:
v = f.check_value(data[f.name])
if f.structvalues == 1:
vals.append(v)
else:
vals.extend(v)
return vals
class PropertyData(ValueField):
structcode = None
def parse_binary_value(self, data, display, length, format):
if length is None:
length = len(data) // (format // 8)
else:
length = int(length)
if format == 0:
ret = None
elif format == 8:
ret = (8, data[:length])
data = data[length + ((4 - length % 4) % 4):]
elif format == 16:
ret = (16, array(array_unsigned_codes[2], data[:2 * length]))
data = data[2 * (length + length % 2):]
elif format == 32:
ret = (32, array(array_unsigned_codes[4], data[:4 * length]))
data = data[4 * length:]
if ret != None and type(ret[1]) is bytes:
ret = (ret[0], ret[1].decode('UTF-8'))
return ret, data
def pack_value(self, value):
fmt, val = value
if fmt not in (8, 16, 32):
raise BadDataError('Invalid property data format %d' % fmt)
if type(val) is str:
val = val.encode('UTF-8')
if type(val) is bytes:
size = fmt // 8
vlen = len(val)
if vlen % size:
vlen = vlen - vlen % size
data = val[:vlen]
else:
data = val
dlen = vlen // size
else:
if type(val) is tuple:
val = list(val)
size = fmt // 8
data = array(array_unsigned_codes[size], val).tobytes()
dlen = len(val)
dl = len(data)
data = data + b'\0' * ((4 - dl % 4) % 4)
return data, dlen, fmt
class FixedPropertyData(PropertyData):
def __init__(self, name, size):
PropertyData.__init__(self, name)
self.size = size
def parse_binary_value(self, data, display, length, format):
return PropertyData.parse_binary_value(self, data, display,
self.size // (format // 8), format)
def pack_value(self, value):
data, dlen, fmt = PropertyData.pack_value(self, value)
if len(data) != self.size:
raise BadDataError('Wrong data length for FixedPropertyData: %s'
% (value, ))
return data, dlen, fmt
class ValueList(Field):
structcode = None
keyword_args = 1
default = 'usekeywords'
def __init__(self, name, mask, pad, *fields):
self.name = name
self.maskcode = '=%s%dx' % (unsigned_codes[mask], pad)
self.maskcodelen = struct.calcsize(self.maskcode)
self.fields = []
flag = 1
for f in fields:
if f.name:
self.fields.append((f, flag))
flag = flag << 1
def pack_value(self, arg, keys):
mask = 0
data = b''
if arg == self.default:
arg = keys
for field, flag in self.fields:
if field.name in arg:
mask = mask | flag
val = arg[field.name]
if field.check_value is not None:
val = field.check_value(val)
d = struct.pack('=' + field.structcode, val)
data = data + d + b'\0' * (4 - len(d))
return struct.pack(self.maskcode, mask) + data, None, None
def parse_binary_value(self, data, display, length, format):
r = {}
mask = int(struct.unpack(self.maskcode, data[:self.maskcodelen])[0])
data = data[self.maskcodelen:]
for field, flag in self.fields:
if mask & flag:
if field.structcode:
vals = struct.unpack('=' + field.structcode,
data[:struct.calcsize('=' + field.structcode)])
if field.structvalues == 1:
vals = vals[0]
if field.parse_value is not None:
vals = field.parse_value(vals, display)
else:
vals, d = field.parse_binary_value(data[:4], display, None, None)
r[field.name] = vals
data = data[4:]
return DictWrapper(r), data
class KeyboardMapping(ValueField):
structcode = None
def parse_binary_value(self, data, display, length, format):
if length is None:
dlen = len(data)
else:
dlen = 4 * length * format
a = array(array_unsigned_codes[4], data[:dlen])
ret = []
for i in range(0, len(a), format):
ret.append(a[i : i + format])
return ret, data[dlen:]
def pack_value(self, value):
keycodes = 0
for v in value:
keycodes = max(keycodes, len(v))
a = array(array_unsigned_codes[4])
for v in value:
for k in v:
a.append(k)
for i in range(len(v), keycodes):
a.append(X.NoSymbol)
return a.tobytes(), len(value), keycodes
class ModifierMapping(ValueField):
structcode = None
def parse_binary_value(self, data, display, length, format):
a = array(array_unsigned_codes[1], data[:8 * format])
ret = []
for i in range(0, 8):
ret.append(a[i * format : (i + 1) * format])
return ret, data[8 * format:]
def pack_value(self, value):
if len(value) != 8:
raise BadDataError('ModifierMapping list should have eight elements')
keycodes = 0
for v in value:
keycodes = max(keycodes, len(v))
a = array(array_unsigned_codes[1])
for v in value:
for k in v:
a.append(k)
for i in range(len(v), keycodes):
a.append(0)
return a.tobytes(), len(value), keycodes
class EventField(ValueField):
structcode = None
def pack_value(self, value):
if not isinstance(value, Event):
raise BadDataError('%s is not an Event for field %s' % (value, self.name))
return value._binary, None, None
def parse_binary_value(self, data, display, length, format):
assert type(data) is bytes
from . import event
estruct = display.event_classes.get(data[0] & 0x7f, event.AnyEvent)
if type(estruct) == dict:
# this etype refers to a set of sub-events with individual subcodes
estruct = estruct[ord(data[1])]
return estruct(display = display, binarydata = data[:32]), data[32:]
#
# Objects usable for List and FixedList fields.
# Struct is also usable.
#
class ScalarObj:
def __init__(self, code):
self.structcode = code
self.structvalues = 1
self.parse_value = None
self.check_value = None
Card8Obj = ScalarObj('B')
Card16Obj = ScalarObj('H')
Card32Obj = ScalarObj('L')
class ResourceObj:
structcode = 'L'
structvalues = 1
def __init__(self, class_name):
self.class_name = class_name
self.check_value = None
def parse_value(self, value, display):
# if not display:
# return value
c = display.get_resource_class(self.class_name)
if c:
return c(display, value)
else:
return value
WindowObj = ResourceObj('window')
ColormapObj = ResourceObj('colormap')
class StrClass:
structcode = None
def pack_value(self, val):
if type(val) is not bytes:
val = val.encode('UTF-8')
if _PY3:
val = bytes([len(val)]) + val
else:
val = chr(len(val)) + val
return val
def parse_binary(self, data, display):
assert type(data) is bytes
slen = data[0] + 1
s = data[1:slen]
try:
s = s.decode('UTF-8')
except UnicodeDecodeError:
pass # return as bytes
return s, data[slen:]
Str = StrClass()
class Struct:
"""Struct objects represents a binary data structure. It can
contain both fields with static and dynamic sizes. However, all
static fields must appear before all dynamic fields.
Fields are represented by various subclasses of the abstract base
class Field. The fields of a structure are given as arguments
when instantiating a Struct object.
Struct objects have two public methods:
to_binary() -- build a binary representation of the structure
with the values given as arguments
parse_binary() -- convert a binary (string) representation into
a Python dictionary or object.
These functions will be generated dynamically for each Struct
object to make conversion as fast as possible. They are
generated the first time the methods are called.
"""
def __init__(self, *fields):
self.fields = fields
# Structures for to_binary, parse_value and parse_binary
self.static_codes = '='
self.static_values = 0
self.static_fields = []
self.static_size = None
self.var_fields = []
for f in self.fields:
# Append structcode if there is one and we haven't
# got any varsize fields yet.
if f.structcode is not None:
assert not self.var_fields
self.static_codes = self.static_codes + f.structcode
# Only store fields with values
if f.structvalues > 0:
self.static_fields.append(f)
self.static_values = self.static_values + f.structvalues
# If we have got one varsize field, all the rest must
# also be varsize fields.
else:
self.var_fields.append(f)
self.static_size = struct.calcsize(self.static_codes)
if self.var_fields:
self.structcode = None
self.structvalues = 0
else:
self.structcode = self.static_codes[1:]
self.structvalues = self.static_values
# These functions get called only once, as they will override
# themselves with dynamically created functions in the Struct
# object
def to_binary(self, *varargs, **keys):
"""data = s.to_binary(...)
Convert Python values into the binary representation. The
arguments will be all value fields with names, in the order
given when the Struct object was instantiated. With one
exception: fields with default arguments will be last.
Returns the binary representation as the string DATA.
"""
code = ''
total_length = str(self.static_size)
joins = []
args = []
defargs = []
kwarg = 0
# First pack all varfields so their lengths and formats are
# available when we pack their static LengthFields and
# FormatFields
i = 0
for f in self.var_fields:
if f.keyword_args:
kwarg = 1
kw = ', _keyword_args'
else:
kw = ''
# Call pack_value method for each field, storing
# the return values for later use
code = code + (' _%(name)s, _%(name)s_length, _%(name)s_format'
' = self.var_fields[%(fno)d].pack_value(%(name)s%(kw)s)\n'
% { 'name': f.name,
'fno': i,
'kw': kw })
total_length = total_length + ' + len(_%s)' % f.name
joins.append('_%s' % f.name)
i = i + 1
# Construct argument list for struct.pack call, packing all
# static fields. First argument is the structcode, the
# remaining are values.
pack_args = ['"%s"' % self.static_codes]
i = 0
for f in self.static_fields:
if isinstance(f, LengthField):
# If this is a total length field, insert
# the calculated field value here
if isinstance(f, TotalLengthField):
if self.var_fields:
pack_args.append('self.static_fields[%d].calc_length(%s)'
% (i, total_length))
else:
pack_args.append(str(f.calc_length(self.static_size)))
else:
pack_args.append('self.static_fields[%d].calc_length(_%s_length)'
% (i, f.name))
# Format field, just insert the value we got previously
elif isinstance(f, FormatField):
pack_args.append('_%s_format' % f.name)
# A constant field, insert its value directly
elif isinstance(f, ConstantField):
pack_args.append(str(f.value))
# Value fields
else:
if f.structvalues == 1:
# If there's a value check/convert function, call it
if f.check_value is not None:
pack_args.append('self.static_fields[%d].check_value(%s)'
% (i, f.name))
# Else just use the argument as provided
else:
pack_args.append(f.name)
# Multivalue field. Handled like single valuefield,
# but the value are tuple unpacked into seperate arguments
# which are appended to pack_args
else:
a = []
for j in range(f.structvalues):
a.append('_%s_%d' % (f.name, j))
if f.check_value is not None:
code = code + (' %s = self.static_fields[%d].check_value(%s)\n'
% (', '.join(a), i, f.name))
else:
code = code + ' %s = %s\n' % (', '.join(a), f.name)
pack_args = pack_args + a
# Add field to argument list
if f.name:
if f.default is None:
args.append(f.name)
else:
defargs.append('%s = %s' % (f.name, repr(f.default)))
i = i + 1
# Construct call to struct.pack
pack = 'struct.pack(%s)' % ', '.join(pack_args)
# If there are any varfields, we append the packed strings to build
# the resulting binary value
if self.var_fields:
code = code + ' return %s + %s\n' % (pack, ' + '.join(joins))
# If there's only static fields, return the packed value
else:
code = code + ' return %s\n' % pack
# Add all varsize fields to argument list. We do it here
# to ensure that they appear after the static fields.
for f in self.var_fields:
if f.name:
if f.default is None:
args.append(f.name)
else:
defargs.append('%s = %s' % (f.name, repr(f.default)))
args = args + defargs
if kwarg:
args.append('**_keyword_args')
# Add function header
code = 'def to_binary(self, %s):\n' % ', '.join(args) + code
# self._pack_code = code
# print
# print code
# print
# Finally, compile function by evaluating it. This will store
# the function in the local variable to_binary, thanks to the
# def: line. Convert it into a instance metod bound to self,
# and store it in self.
# Unfortunately, this creates a circular reference. However,
# Structs are not really created dynamically so the potential
# memory leak isn't that serious. Besides, Python 2.0 has
# real garbage collect.
exec(code)
self.to_binary = types.MethodType(locals()["to_binary"], self)
# Finally call it manually
return self.to_binary(*varargs, **keys)
def pack_value(self, value):
""" This function allows Struct objects to be used in List and
Object fields. Each item represents the arguments to pass to
to_binary, either a tuple, a dictionary or a DictWrapper.
"""
if type(value) is tuple:
return self.to_binary(*value, **{})
elif type(value) is dict:
return self.to_binary(*(), **value)
elif isinstance(value, DictWrapper):
return self.to_binary(*(), **value._data)
else:
raise BadDataError('%s is not a tuple or a list' % (value))
def parse_value(self, val, display, rawdict = 0):
"""This function is used by List and Object fields to convert
Struct objects with no var_fields into Python values.
"""
code = ('def parse_value(self, val, display, rawdict = 0):\n'
' ret = {}\n')
vno = 0
fno = 0
for f in self.static_fields:
# Fields without names should be ignored, and there should
# not be any length or format fields if this function
# ever gets called. (If there were such fields, there should
# be a matching field in var_fields and then parse_binary
# would have been called instead.
if not f.name:
pass
elif isinstance(f, LengthField):
pass
elif isinstance(f, FormatField):
pass
# Value fields
else:
# Get the index or range in val representing this field.
if f.structvalues == 1:
vrange = str(vno)
else:
vrange = '%d:%d' % (vno, vno + f.structvalues)
# If this field has a parse_value method, call it, otherwise
# use the unpacked value as is.
if f.parse_value is None:
code = code + ' ret["%s"] = val[%s]\n' % (f.name, vrange)
else:
code = code + (' ret["%s"] = self.static_fields[%d].'
'parse_value(val[%s], display)\n'
% (f.name, fno, vrange))
fno = fno + 1
vno = vno + f.structvalues
code = code + ' if not rawdict: return DictWrapper(ret)\n'
code = code + ' return ret\n'
# print
# print code
# print
# Finally, compile function as for to_binary.
exec(code)
self.parse_value = types.MethodType(locals()["parse_value"], self)
# Call it manually
return self.parse_value(val, display, rawdict)
def parse_binary(self, data, display, rawdict = 0):
"""values, remdata = s.parse_binary(data, display, rawdict = 0)
Convert a binary representation of the structure into Python values.
DATA is a string or a buffer containing the binary data.
DISPLAY should be a Xlib.protocol.display.Display object if
there are any Resource fields or Lists with ResourceObjs.
The Python values are returned as VALUES. If RAWDICT is true,
a Python dictionary is returned, where the keys are field
names and the values are the corresponding Python value. If
RAWDICT is false, a DictWrapper will be returned where all
fields are available as attributes.
REMDATA are the remaining binary data, unused by the Struct object.
"""
code = ('def parse_binary(self, data, display, rawdict = 0):\n'
' ret = {}\n'
' val = struct.unpack("%s", data[:%d])\n'
% (self.static_codes, self.static_size))
lengths = {}
formats = {}
vno = 0
fno = 0
for f in self.static_fields:
# Fields without name should be ignored. This is typically
# pad and constant fields
if not f.name:
pass
# Store index in val for Length and Format fields, to be used
# when treating varfields.
elif isinstance(f, LengthField):
f_names = [f.name]
if f.other_fields:
f_names.extend(f.other_fields)
for f_name in f_names:
if f.parse_value is None:
lengths[f_name] = 'val[%d]' % vno
else:
lengths[f_name] = ('self.static_fields[%d].'
'parse_value(val[%d], display)'
% (fno, vno))
elif isinstance(f, FormatField):
formats[f.name] = 'val[%d]' % vno
# Treat value fields the same was as in parse_value.
else:
if f.structvalues == 1:
vrange = str(vno)
else:
vrange = '%d:%d' % (vno, vno + f.structvalues)
if f.parse_value is None:
code = code + ' ret["%s"] = val[%s]\n' % (f.name, vrange)
else:
code = code + (' ret["%s"] = self.static_fields[%d].'
'parse_value(val[%s], display)\n'
% (f.name, fno, vrange))
fno = fno + 1
vno = vno + f.structvalues
code = code + ' data = data[%d:]\n' % self.static_size
# Call parse_binary_value for each var_field, passing the
# length and format values from the unpacked val.
fno = 0
for f in self.var_fields:
code = code + (' ret["%s"], data = '
'self.var_fields[%d].parse_binary_value'
'(data, display, %s, %s)\n'
% (f.name, fno,
lengths.get(f.name, 'None'),
formats.get(f.name, 'None')))
fno = fno + 1
code = code + ' if not rawdict: ret = DictWrapper(ret)\n'
code = code + ' return ret, data\n'
# print
# print code
# print
# Finally, compile function as for to_binary.
exec(code)
self.parse_binary = types.MethodType(locals()["parse_binary"], self)
# Call it manually
return self.parse_binary(data, display, rawdict)
class TextElements8(ValueField):
string_textitem = Struct( LengthOf('string', 1),
Int8('delta'),
String8('string', pad = 0) )
def pack_value(self, value):
data = b''
args = {}
for v in value:
# Let values be simple strings, meaning a delta of 0
if type(v) is bytes:
v = (0, v)
# A tuple, it should be (delta, string)
# Encode it as one or more textitems
if type(v) in (tuple, dict) or \
isinstance(v, DictWrapper):
if type(v) is tuple:
delta, s = v
else:
delta = v['delta']
s = v['string']
while delta or s:
args['delta'] = delta
args['string'] = s[:254]
data = data + self.string_textitem.to_binary(*(), **args)
delta = 0
s = s[254:]
# Else an integer, i.e. a font change
else:
# Use fontable cast function if instance
try:
v = v.__fontable__()
except AttributeError as e: pass
data = data + struct.pack('>BL', 255, v)
# Pad out to four byte length
dlen = len(data)
return data + b'\0' * ((4 - dlen % 4) % 4), None, None
def parse_binary_value(self, data, display, length, format):
values = []
while 1:
if len(data) < 2:
break
# font change
assert type(data) is bytes
if data[0] == 255:
values.append(struct.unpack('>L', data[1:5])[0])
data = data[5:]
# skip null strings
elif data[0] == 0 and data[1] == 0:
data = data[2:]
# string with delta
else:
v, data = self.string_textitem.parse_binary(data, display)
values.append(v)
return values, b''
class TextElements16(TextElements8):
string_textitem = Struct( LengthOf('string', 1),
Int8('delta'),
String16('string', pad = 0) )
class GetAttrData(object):
def __getattr__(self, attr):
try:
if self._data:
return self._data[attr]
else:
raise AttributeError(attr)
except KeyError:
raise AttributeError(attr)
class DictWrapper(GetAttrData):
def __init__(self, dict):
self.__dict__['_data'] = dict
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, value):
self._data[key] = value
def __delattr__(self, key):
del self._data[key]
def __str__(self):
return str(self._data)
def __repr__(self):
return '%s(%s)' % (self.__class__, repr(self._data))
def __eq__(self, other):
if isinstance(other, DictWrapper):
return self._data == other._data
else:
return self._data == other
def __ne__(self, other):
return not self.__eq__(other)
class Request:
def __init__(self, display, onerror = None, *args, **keys):
self._errorhandler = onerror
self._binary = self._request.to_binary(*args, **keys)
self._serial = None
display.send_request(self, onerror is not None)
def _set_error(self, error):
if self._errorhandler is not None:
return call_error_handler(self._errorhandler, error, self)
else:
return 0
class ReplyRequest(GetAttrData):
def __init__(self, display, defer = 0, *args, **keys):
self._display = display
self._binary = self._request.to_binary(*args, **keys)
self._serial = None
self._data = None
self._error = None
self._response_lock = lock.allocate_lock()
self._display.send_request(self, 1)
if not defer:
self.reply()
def reply(self):
# Send request and wait for reply if we hasn't
# already got one. This means that reply() can safely
# be called more than one time.
self._response_lock.acquire()
while self._data is None and self._error is None:
self._display.send_recv_lock.acquire()
self._response_lock.release()
self._display.send_and_recv(request = self._serial)
self._response_lock.acquire()
self._response_lock.release()
self._display = None
# If error has been set, raise it
if self._error:
raise self._error
def _parse_response(self, data):
self._response_lock.acquire()
self._data, d = self._reply.parse_binary(data, self._display, rawdict = 1)
self._response_lock.release()
def _set_error(self, error):
self._response_lock.acquire()
self._error = error
self._response_lock.release()
return 1
def __repr__(self):
return '<%s serial = %s, data = %s, error = %s>' % (self.__class__, self._serial, self._data, self._error)
class Event(GetAttrData):
def __init__(self, binarydata = None, display = None,
**keys):
if binarydata:
self._binary = binarydata
self._data, data = self._fields.parse_binary(binarydata, display,
rawdict = 1)
# split event type into type and send_event bit
self._data['send_event'] = not not self._data['type'] & 0x80
self._data['type'] = self._data['type'] & 0x7f
else:
if self._code:
keys['type'] = self._code
keys['sequence_number'] = 0
self._binary = self._fields.to_binary(*(), **keys)
keys['send_event'] = 0
self._data = keys
def __repr__(self):
kwlist = []
for kw, val in self._data.items():
if kw == 'send_event':
continue
if kw == 'type' and self._data['send_event']:
val = val | 0x80
kwlist.append('%s = %s' % (kw, repr(val)))
kws = ', '.join(kwlist)
return '%s(%s)' % (self.__class__, kws)
def __eq__(self, other):
if isinstance(other, Event):
return self._data == other._data
else:
return cmp(self._data, other)
def call_error_handler(handler, error, request):
try:
return handler(error, request)
except:
sys.stderr.write('Exception raised by error handler.\n')
traceback.print_exc()
return 0
|
gpl-2.0
| 5,240,419,798,686,419,000
| 28.929802
| 114
| 0.530938
| false
| 4.088571
| false
| false
| false
|
Eszti/pymachine
|
src/pymachine/definition_parser.py
|
1
|
20690
|
import logging
import sys
import re
import string
from collections import defaultdict
try:
import pyparsing
from pyparsing import Literal, Word, Group, Combine, Optional, Forward, alphanums, SkipTo, LineEnd, nums, delimitedList # nopep8
except ImportError:
logging.critical("PyParsing has to be installed on the computer")
sys.exit(-1)
from hunmisc.xstring.encoding import decode_from_proszeky
from constants import deep_cases, avm_pre, deep_pre, enc_pre, id_sep
from pymachine.machine import Machine
from pymachine.control import ConceptControl
class ParserException(Exception):
pass
class DefinitionParser(object):
_str = set([str, unicode])
lb = "["
rb = "]"
lp = "("
rp = ")"
left_defa = '<'
right_defa = '>'
clause_sep = ","
part_sep = ";"
prime = "'"
hyphen = "-"
langspec_pre = "$" # starts langspec deep case
unary_p = re.compile("^[a-z_#\-/0-9]+(/[0-9]+)?$")
binary_p = re.compile("^[A-Z_0-9]+(/[0-9]+)?$")
def __init__(self, plur_dict):
self.plur_dict = plur_dict
self.init_parser()
@classmethod
def _is_binary(cls, s):
return ((type(s) in cls._str and cls.binary_p.match(s)) or
(type(s) is list and s[0] == deep_pre and s[1] == "REL"))
@classmethod
def _is_unary(cls, s):
return ((type(s) in cls._str and cls.unary_p.match(s) is not None) or
(type(s) is list and (
(s[0] == deep_pre) or
(s[0] == cls.langspec_pre) or
(s[0] == enc_pre) or
(s[0] == cls.left_defa)
)))
@classmethod
def _is_deep_case(cls, s):
return s in deep_cases
def init_parser(self):
self.lb_lit = Literal(DefinitionParser.lb)
self.rb_lit = Literal(DefinitionParser.rb)
self.lp_lit = Literal(DefinitionParser.lp)
self.rp_lit = Literal(DefinitionParser.rp)
self.left_defa_lit = Literal(DefinitionParser.left_defa)
self.right_defa_lit = Literal(DefinitionParser.right_defa)
self.clause_sep_lit = Literal(DefinitionParser.clause_sep)
self.part_sep_lit = Literal(DefinitionParser.part_sep)
self.prime_lit = Literal(DefinitionParser.prime)
self.hyphen_lit = Literal(DefinitionParser.hyphen)
self.enc_pre_lit = Literal(enc_pre)
self.deep_pre_lit = Literal(deep_pre)
self.avm_pre_lit = Literal(avm_pre)
self.langspec_pre_lit = Literal(DefinitionParser.langspec_pre)
self.id_sep_lit = Literal(id_sep)
self.disambig_id = self.id_sep_lit + Word(nums)
self.deep_cases = Group(self.deep_pre_lit + Word(string.uppercase))
self.unary = Forward()
self.unary << (Combine(Optional("-") +
Word(string.lowercase + "_" + nums) +
Optional(self.disambig_id))
| self.deep_cases
| Group(self.langspec_pre_lit +
Word(string.uppercase + "_"))
| Group(self.avm_pre_lit +
Word(string.ascii_letters + "_"))
| Group(self.enc_pre_lit + Word(alphanums + "_-"))
| Group(self.left_defa_lit + self.unary +
self.right_defa_lit))
self.binary = (Combine(Word(string.uppercase + "_" + nums) +
Optional(self.disambig_id))
| Group(self.deep_pre_lit + 'REL'))
self.dontcare = SkipTo(LineEnd())
# main expression
self.expression = Forward()
self.binexpr = Forward()
self.unexpr = Forward()
self.argexpr = Forward()
# "enumerable expression"
# D -> E | E, D
self.definition = Group(delimitedList(self.expression,
delim=DefinitionParser.clause_sep))
self.expression << Group(
# E -> UE
(self.unexpr) ^
# E -> BE
(self.binexpr) ^
# E -> U ( E )
(self.unary + self.lp_lit + self.expression + self.rp_lit) ^
# E -> < E >
(self.left_defa_lit + self.expression + self.right_defa_lit)
)
self.binexpr << Group(
# BE -> A B
(self.argexpr + self.binary) ^
# BE -> B A
(self.binary + self.argexpr) ^
# BE -> A B A
(self.argexpr + self.binary + self.argexpr) ^
# BE -> B [ E; E ]
(self.binary + self.lb_lit + self.expression + self.part_sep_lit
+ self.expression + self.rb_lit)
)
self.unexpr << Group(
# UE -> U
(self.unary) ^
# UE -> U [ D ]
(self.unary + self.lb_lit + self.definition + self.rb_lit) ^
# UE -> U ( U )
(self.unary + self.lp_lit + self.unary + self.rp_lit)
)
self.argexpr << Group(
# A -> UE
(self.unexpr) ^
# A -> [ D ]
(self.lb_lit + self.definition + self.rb_lit) ^
# A -> < A >
(self.left_defa_lit + self.argexpr + self.right_defa_lit) ^
# A -> '
(self.prime_lit)
)
self.hu, self.pos, self.en, self.lt, self.pt = (
Word(alphanums + "#-/_.'"),) * 5
self.defid = Word(nums)
self.word = Group(self.hu + self.pos + self.en + self.lt + self.pt)
# S -> W : D | W : D % _
#self.sen = self.definition + LineEnd()
def parse(self, s):
return self.definition.parseString(s, parseAll=True).asList()
def create_machine(self, name, partitions):
# lists are accepted because of ["=", "AGT"]
if type(name) is list:
name = "".join(name)
# HACK until we find a good solution for defaults
name = name.strip('<>')
is_plur = name in self.plur_dict
if is_plur:
name = self.plur_dict[name]
m = Machine(decode_from_proszeky(name),
ConceptControl(), partitions)
if is_plur:
m.append(self.create_machine('more', 1), 0)
return m
def unify(self, machine):
def __collect_machines(m, machines, is_root=False):
# cut the recursion
key = m.printname(), __has_other(m)
if (key in machines and m in machines[key]):
return
if not is_root:
machines[m.printname(), __has_other(m)].append(m)
for partition in m.partitions:
for m_ in partition:
__collect_machines(m_, machines)
def __has_other(m):
for m_ in m.partitions[0]:
if m_.printname() == "other":
return True
return False
def __get_unified(machines, res=None):
# if nothing to unify, don't
if len(machines) == 1:
return machines[0]
# if a return machine is given, don't create a new one
if res is None:
prototype = machines[0]
res = self.create_machine(prototype.printname(),
len(prototype.partitions))
for m in machines:
# if the same machine, don't add anything
if id(m) == id(res):
continue
for p_i, p in enumerate(m.partitions):
for part_m in p:
if part_m.printname() != "other":
res.partitions[p_i].append(part_m)
part_m.del_parent_link(m, p_i)
part_m.add_parent_link(res, p_i)
return res
def __replace(where, for_what, is_other=False, visited=None):
if visited is None:
visited = set()
if id(where) in visited:
return
visited.add(id(where))
pn = for_what.printname()
for p_i, p in enumerate(where.partitions):
# change the partition machines
for part_m_i, part_m in enumerate(p):
if part_m.printname() == pn and __has_other(
part_m) == is_other:
where.partitions[p_i][part_m_i] = for_what
for_what.add_parent_link(where, p_i)
__replace(where.partitions[p_i][part_m_i],
for_what, is_other, visited)
# unification if there is a machine more than once on the same
# partition
where.partitions[p_i] = list(set(p))
machines = defaultdict(list)
__collect_machines(machine, machines, is_root=True)
for k, machines_to_unify in machines.iteritems():
if len(machines_to_unify[0].partitions) > 1:
continue
printname, is_other = k
#if unification affects the root (machine),
#be that the result machine
if printname == machine.printname():
unified = __get_unified(machines_to_unify, machine)
else:
unified = __get_unified(machines_to_unify)
__replace(machine, unified, is_other)
def __parse_expr(self, expr, root, loop_to_defendum=True,
three_parts=False):
"""
creates machines from a parse node and its children
there should be one handler for every rule
"""
logging.debug("Parsing expression: {0}".format(expr))
# name shortening for classmethods
cls = DefinitionParser
is_binary = cls._is_binary
is_unary = cls._is_unary
is_tree = lambda r: type(r) == list
left_part = 0 + int(three_parts)
right_part = 1 + int(three_parts)
most_part = 2 + int(three_parts)
if (len(expr) == 1):
# UE -> U
if (is_unary(expr[0])):
logging.debug("Parsing {0} as a unary.".format(expr[0]))
return [self.create_machine(expr[0], 1)]
# E -> UE | BE, A -> UE
if (is_tree(expr[0])):
logging.debug("Parsing {0} as a tree.".format(expr[0]))
return self.__parse_expr(expr[0], root, loop_to_defendum,
three_parts)
if (len(expr) == 2):
# BE -> A B
if (is_tree(expr[0]) and
is_binary(expr[1])):
m = self.create_machine(expr[1], most_part)
if expr[0] != ["'"]:
m.append_all(
self.__parse_expr(expr[0], root, loop_to_defendum,
three_parts),
left_part)
if loop_to_defendum:
m.append(root, right_part)
return [m]
# BE -> B A
if (is_binary(expr[0]) and
is_tree(expr[1])):
m = self.create_machine(expr[0], most_part)
if expr[1] != ["'"]:
m.append_all(
self.__parse_expr(expr[1], root, loop_to_defendum,
three_parts),
right_part)
if loop_to_defendum:
m.append(root, left_part)
return [m]
# BE -> 'B
if (expr[0] == ["'"] and
is_binary(expr[1])):
m = self.create_machine(expr[1], most_part)
#m.append(parent, 1)
if loop_to_defendum:
m.append(root, right_part)
return [m]
# BE -> B'
if (is_binary(expr[0]) and
expr[1] == ["'"]):
m = self.create_machine(expr[0], most_part)
# m.append(parent, 0)
if loop_to_defendum:
m.append(root, left_part)
return [m]
# U -> =AGT
if expr[0] == deep_pre:
return [self.create_machine(deep_pre + expr[1], 1)]
# U -> $HUN_FROM
if (expr[0] == cls.langspec_pre):
return [self.create_machine(cls.langspec_pre + expr[1], 1)]
# U -> #AVM
if (expr[0] == avm_pre):
return [self.create_machine(avm_pre + expr[1], 1)]
# U -> @External_url
if (expr[0] == enc_pre):
return [self.create_machine(enc_pre + expr[1], 1)]
if (len(expr) == 3):
# UB -> A B A
if (is_tree(expr[0]) and
is_binary(expr[1]) and
is_tree(expr[2])):
m = self.create_machine(expr[1], most_part)
logging.debug(expr[1])
if expr[0] != [DefinitionParser.prime]:
logging.debug(expr[0])
m.append_all(
self.__parse_expr(expr[0], root, loop_to_defendum,
three_parts),
left_part)
if expr[2] != [DefinitionParser.prime]:
m.append_all(
self.__parse_expr(expr[2], root, loop_to_defendum,
three_parts),
right_part)
return [m]
# A -> [ D ]
if (expr[0] == "[" and
is_tree(expr[1]) and
expr[2] == "]"):
logging.debug(
"Parsing expr {0} as an embedded definition".format(expr))
res = list(
self.__parse_definition(expr[1], root, loop_to_defendum,
three_parts))
return res
# E -> < E >, U -> < U >
if expr[0] == '<' and expr[2] == '>':
logging.debug('E -> < E >' + str(expr[1]))
return list(self.__parse_expr(expr[1], root, loop_to_defendum,
three_parts))
if (len(expr) == 4):
# UE -> U ( U )
# E -> U ( BE ) provisional
if (is_unary(expr[0]) and
expr[1] == "(" and
expr[3] == ")"):
logging.debug('X -> U ( Y )')
if is_unary(expr[2]):
m = self.create_machine(expr[2], 1)
else:
m = self.__parse_expr(expr[2], root, loop_to_defendum,
three_parts)[0]
if not three_parts:
logging.warning(
"for 0th partition of binary machines, " +
"set three_parts=True, "+str(expr))
m.append(self.create_machine(expr[0], 1), 0)
return [m]
# UE -> U [ D ]
if (is_unary(expr[0]) and
expr[1] == "[" and
is_tree(expr[2]) and
expr[3] == "]"):
m = self.create_machine(expr[0], 1)
for parsed_expr in self.__parse_definition(expr[2], root,
loop_to_defendum,
three_parts):
m.append(parsed_expr, 0)
return [m]
# E -> U ( BE )
#if (is_unary(expr[0]) and
# expr[1] == "(" and
# is_tree(expr[2]) and
# expr[3] == ")"):
# ms = self.__parse_expr(expr[2], root, loop_to_defendum,
# three_parts)
# # if BE was an expression with an apostrophe, then
# # return of __parse_expr() is None
# if len(ms) != 0:
# ms[0].append(self.create_machine(expr[0], 1), 0)
# # if len(ms) == 3 and ms[0] == '<':
# # ms = ms[1]
# if len(ms) != 1:
# logging.warning("0th partition of binary machines " +
# "is not implemented "+str(ms))
# return ms
logging.warning('machine cannot be built '+str(expr))
if (len(expr) == 6):
# BE -> B [E; E]
if (is_binary(expr[0]) and
expr[1] == "[" and
is_tree(expr[2]) and
expr[3] == ";" and
is_tree(expr[4]) and
expr[5] == "]"):
m = self.create_machine(expr[0], 2)
m.append_all(
self.__parse_expr(expr[2], m, root, loop_to_defendum,
three_parts),
0)
m.append_all(
self.__parse_expr(expr[4], m, root, loop_to_defendum,
three_parts),
1)
return [m]
pe = ParserException(
"Unknown expression in definition: {0} (len={1})".format(
expr,
len(expr)))
logging.debug(str(pe))
logging.debug(expr)
raise pe
def __parse_definition(self, definition, root, loop_to_defendum=True,
three_parts=False):
logging.debug(str(definition))
for d in definition:
yield self.__parse_expr(d, root, loop_to_defendum, three_parts)[0]
def parse_into_machines(self, string, printname_index=0, add_indices=False,
loop_to_defendum=True, three_parts=False):
printname = string.split('\t')[printname_index]
try:
id_, urob, pos, def_, comment = string.split('\t')[4:]
except:
raise Exception(string.split('\t'))
machine = self.create_machine(printname.lower(), 1)
#TODO =AGT -> partition 1, =PAT -> partition 2, =TO -> ?
if add_indices:
machine.printname_ = machine.printname() + id_sep + id_
if def_ != '':
logging.debug(def_)
parsed = self.parse(def_)
logging.debug(parsed)
for parsed_expr in self.__parse_definition(
parsed[0], machine, loop_to_defendum, three_parts):
machine.append(parsed_expr, 0)
self.unify(machine)
return machine
def read(f, plur_filn, printname_index=0, add_indices=False,
loop_to_defendum=True, three_parts=False):
logging.warning(
"Will now discard all but the first definition of each \
headword!".upper())
d = defaultdict(set)
plur_dict = read_plur(open(plur_filn)) if plur_filn else {}
dp = DefinitionParser(plur_dict)
for line in f:
l = line.strip('\n')
logging.debug("Parsing: {0}".format(l))
try:
m = dp.parse_into_machines(l, printname_index, add_indices,
loop_to_defendum, three_parts)
if m.partitions[0] == []:
logging.debug('dropping empty definition of '+m.printname())
continue
pn = m.printname()
if pn in d:
continue
# logging.warning('duplicate pn: {0}, machines: {1}, {2}'.format(
# pn, d[pn], "{0}:{1}".format(m, m.partitions)))
d[m.printname()].add(m)
logging.debug('\n'+m.to_debug_str())
except pyparsing.ParseException, pe:
print l
logging.error("Error: "+str(pe))
return d
def read_plur(_file):
plur_dict = {}
for line in _file:
plur, sg = line.split()
plur_dict[plur] = sg
return plur_dict
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING,
format="%(asctime)s : %(module)s (%(lineno)s) " +
"- %(levelname)s - %(message)s")
plur_dict = read_plur(open('/home/recski/projects/4lang/4lang.plural'))
dp = DefinitionParser(plur_dict)
pstr = sys.argv[-1]
if sys.argv[1] == "-d":
print Machine.to_debug_str(dp.parse_into_machines(pstr), max_depth=99)
elif sys.argv[1] == "-f":
lexicon = read(file(sys.argv[2]), '../../res/4lang/4lang.plural',
three_parts=True)
else:
print dp.parse(pstr)
|
mit
| -156,672,265,516,880,830
| 35.298246
| 133
| 0.460222
| false
| 3.894955
| false
| false
| false
|
COMBINE-lab/matryoshka_work
|
coredomains-import/python-src/domain_size_all_chroms.py
|
1
|
5607
|
# get min, max, avg domain sizes across different resolutions
import sys
import matplotlib.pyplot as plt
import matplotlib
import collections
Domain = collections.namedtuple("Domain", ['start', 'end'])
################################################
#
#
#
################################################
def parseBingRenDomains(path):
print "Parsing bing ren"
B = {}
with open(path, 'r') as f:
for line in f:
chromo, start, stop = line.strip().split()
if not (chromo in B): B[chromo] = []
B[chromo].append( Domain( int(start) / step, int(stop) /step ) )
avg = {}
total_sum = 0
total_len = 0
for chromo in sorted(B.keys()):
total_sum += sum ( [d.end - d.start for d in B[chromo] ] )
total_len += len(B[chromo])
a = sum ( [d.end - d.start for d in B[chromo] ] ) * 1.0 / len(B[chromo])
print "Avg B.R. domain length,", chromo, "\t", a, "\t", a * step
avg[chromo] = a
print "Compare", sum(avg.values() ) / len(avg.values() ) * step, total_sum * 1.0 / total_len * step
return B
################################################
#
#
#
################################################
def parseMultiResDomains(paths, step):
domains_chro = {}
print "Parsing our domains"
for f_name in paths:
parts = f_name[:-3].split(".")
chromo = parts[1]
if not (chromo in domains_chro ):
domains_chro[chromo] = {}
a = parts[-1]
#assert(chro == chromo)
if len(a) == 1:
a = int(a) * 0.1
elif len(a)==2:
a = int(a) * 0.01
elif 'alpha' in a:
a = 1.0
domains_chro[chromo][a] = []
with open(f_name, 'r') as f_in:
for line in f_in:
parts = line.strip().split()
if len(parts) == 2:
start = parts[0]
end = parts[1]
else:
start = parts[1]
end = parts[2]
domains_chro[chromo][a].append( Domain(int(start) / step, int(end) / step) )
#sorted_items = sorted([(k,v) for k,v in domains_res.iteritems()], key=lambda x: x[0] )
#print [y[0] for y in sorted_items[:10] ]
#print map(len, [y[1] for y in sorted_items] )
return domains_chro
################################################
#
#
#
################################################
def plotSizes(BR_cnt, BR_avg, Gamma, Avg, Min, Max, Cnt):
plt.subplot(211)
for chromo in Avg.keys():
plt.plot(Gamma, Avg[chromo], 'b-', Gamma, Max[chromo], 'r-', Gamma, Min[chromo], 'g-', alpha=0.3)
plt.plot( [min(Gamma), max(Gamma)], [BR_avg[chromo], BR_avg[chromo] ], 'm-', alpha=0.3)
plt.xlabel('$\gamma$, resolution parameter')
plt.ylabel('avg domain size, in 40Kb')
plt.yscale('log')
# plt.grid(True)
plt.subplot(212)
for chromo in Cnt.keys():
plt.plot(Gamma, Cnt[chromo], 'b-', alpha=0.3)
plt.plot( [min(Gamma), max(Gamma)], [BR_cnt[chromo], BR_cnt[chromo] ], 'm-', alpha=0.3)
plt.ylabel('number of domains')
plt.xlabel('all chromosomes')
plt.yscale('log')
f_name = "domain_sizes_all.pdf"
plt.savefig(f_name)
#plt.show()
print "Saved to", f_name
################################################
#
#
#
################################################
def plotSizesAvgOverChromos(BR_cnt, BR_avg, Gamma, Avg, Min, Max, Cnt):
plt.subplot(211)
font = {'family' : 'normal',
'size' : 20}
matplotlib.rc('font', **font)
avg_avg = []
max_avg = []
min_avg = []
BR_avg_avg = []
cnt_avg = []
num_chromo = len(Avg)
for i in xrange(len(Gamma)):
g = Gamma[i]
avg_avg.append( sum ( [res[i] for chromo, res in Avg.iteritems() ] ) / num_chromo )
max_avg.append( sum ( [res[i] for chromo, res in Max.iteritems() ] ) / num_chromo )
min_avg.append( sum ( [res[i] for chromo, res in Min.iteritems() ] ) / num_chromo )
cnt_avg.append( sum ( [res[i] for chromo, res in Cnt.iteritems() ] ) / num_chromo )
print avg_avg
print max_avg
print min_avg
plt.plot(Gamma, avg_avg, 'b-', Gamma, max_avg, 'r-', Gamma, min_avg, 'g-')
# plt.plot(Gamma, avg_avg, 'b-', alpha=0.7)
BR_avg_avg = sum( [data for chromo, data in BR_avg.iteritems()] ) / len(BR_avg)
plt.plot( [min(Gamma), max(Gamma)], [BR_avg_avg, BR_avg_avg ], 'm-', alpha=0.7)
# plt.xlabel('$\gamma$, resolution parameter')
plt.ylabel('size, in 40Kb')
plt.yscale('log')
# plt.grid(True)
plt.subplot(212)
plt.plot(Gamma, cnt_avg, 'b-')
BR_cnt_avg = sum( [data for chromo, data in BR_cnt.iteritems()] ) / len(BR_cnt)
plt.plot( [min(Gamma), max(Gamma)], [BR_cnt_avg, BR_cnt_avg ], 'm-')
plt.ylabel('domain count')
# plt.xlabel('all chromosomes')
plt.xlabel('$\gamma$, resolution parameter')
plt.yscale('log')
f_name = "domain_sizes_all.pdf"
plt.savefig(f_name)
#plt.show()
print "Saved to", f_name
#
# Main
#
binren = sys.argv[1]
dp_domains = sys.argv[2:]
step = 40000
b_domains = parseBingRenDomains(binren)
multi_dom = parseMultiResDomains(dp_domains, step)
br_cnt = {chromo: len(domains) for (chromo, domains) in b_domains.iteritems() }
br_avg_s = {chromo: sum( [ (d.end+1 - d.start) for d in b_domains[chromo]] ) * 1.0 / br_cnt[chromo] for chromo in br_cnt.keys() }
# calculate sizes
# average per chromosme, per resolution
Gamma = []
Avg = {}
Min = {}
Max = {}
Cnt = {}
#sort by gamma
doneOnce = False
for chrom, resolutions in multi_dom.iteritems():
Avg[chrom] = []
Min[chrom] = []
Max[chrom] = []
Cnt[chrom] = []
resolutions = sorted( resolutions.items(), key=lambda x: x[0] )
for g, domains in resolutions:
if not doneOnce:
Gamma.append(g)
lens = [d.end+1 - d.start for d in domains]
Avg[chrom].append( sum(lens) * 1.0 / len (domains) )
Min[chrom].append( min(lens) )
Max[chrom].append( max(lens) )
Cnt[chrom].append( len(domains) )
doneOnce = True
#print Min
# Plot
plotSizesAvgOverChromos(br_cnt, br_avg_s, Gamma, Avg, Min, Max, Cnt)
|
gpl-3.0
| 1,464,951,984,376,433,000
| 27.035
| 129
| 0.579098
| false
| 2.636107
| false
| false
| false
|
kynikos/lib.py.wxclasses
|
src/wxclasses/timectrls.py
|
1
|
15455
|
# wxClasses
# Copyright (C) 2013-2014 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of wxClasses.
#
# wxClasses is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wxClasses is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with wxClasses. If not, see <http://www.gnu.org/licenses/>.
import time as time_
import datetime as datetime_
import wx
from choices import MultipleChoiceCtrl
from misc import NarrowSpinCtrl
class HourCtrl(object):
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.hourctrl = NarrowSpinCtrl(self.panel, min=0, max=23,
style=wx.SP_ARROW_KEYS | wx.SP_WRAP)
box.Add(self.hourctrl, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
slabel = wx.StaticText(self.panel, label=':')
box.Add(slabel, flag=wx.ALIGN_CENTER_VERTICAL)
self.minutectrl = NarrowSpinCtrl(self.panel, min=0, max=59,
style=wx.SP_ARROW_KEYS | wx.SP_WRAP)
box.Add(self.minutectrl, flag=wx.ALIGN_CENTER_VERTICAL)
def set_values(self, hour, minute):
self.hourctrl.SetValue(hour)
self.minutectrl.SetValue(minute)
def get_main_panel(self):
return self.panel
def get_hour(self):
return self.hourctrl.GetValue()
def get_minute(self):
return self.minutectrl.GetValue()
def get_relative_time(self):
hour = self.hourctrl.GetValue()
minute = self.minutectrl.GetValue()
return hour * 3600 + minute * 60
class WeekDayCtrl(object):
choices = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday')
def __init__(self, parent):
self.panel = wx.Panel(parent)
self.dayctrl = wx.Choice(self.panel, choices=self.choices)
def set_day(self, day):
self.dayctrl.SetSelection(self.dayctrl.FindString(day))
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.GetString(self.dayctrl.GetSelection())
def get_relative_unix_time(self):
# Day 1 in Unix time was a Thursday
return {
'Thursday': 0,
'Friday': 86400,
'Saturday': 172800,
'Sunday': 259200,
'Monday': 345600,
'Tuesday': 432000,
'Wednesday': 518400,
}[self.get_day()]
@classmethod
def compute_widget_day(cls, timew):
# Any check that 0 <= number <= 6 should be done outside of here
return cls.choices[timew]
@classmethod
def compute_day_label(cls, day):
return cls.choices.index(day)
class MonthDayCtrl(object):
choices = ('1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th',
'10th', '11th', '12th', '13th', '14th', '15th', '16th', '17th',
'18th', '19th', '20th', '21st', '22nd', '23rd', '24th', '25th',
'26th', '27th', '28th', '29th', '30th', '31st')
def __init__(self, parent):
self.panel = wx.Panel(parent)
self.dayctrl = wx.Choice(self.panel, choices=self.choices)
def set_day(self, day):
self.dayctrl.SetSelection(day - 1)
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.GetSelection() + 1
def get_relative_time(self):
return self.get_day() * 86400 - 86400
@classmethod
def compute_day_label(cls, day):
return cls.choices[day - 1]
class MonthInverseDayCtrl(MonthDayCtrl):
choices = ['last', ] + [d + ' to last' for d in ('2nd', '3rd', '4th',
'5th', '6th', '7th', '8th', '9th', '10th', '11th', '12th',
'13th', '14th', '15th', '16th', '17th', '18th', '19th', '20th',
'21st', '22nd', '23rd', '24th', '25th', '26th', '27th', '28th',
'29th', '30th', '31st')]
def get_day(self):
return self.dayctrl.GetSelection() + 1
@classmethod
def compute_day_label(cls, day):
return cls.choices[day - 1].replace(' ', '-')
class MonthWeekdayNumberCtrl(MonthDayCtrl):
choices = ('1st', '2nd', '3rd', '4th', '5th')
class MonthInverseWeekdayNumberCtrl(MonthInverseDayCtrl):
choices = ['last', ] + [d + ' to last' for d in ('2nd', '3rd', '4th',
'5th')]
class MonthWeekdayCtrl(object):
mwnctrl = MonthWeekdayNumberCtrl
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.numberctrl = self.mwnctrl(self.panel)
box.Add(self.numberctrl.get_main_panel(),
flag=wx.ALIGN_CENTER_VERTICAL)
self.dayctrl = WeekDayCtrl(self.panel)
box.Add(self.dayctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, number, day):
self.numberctrl.set_day(number)
self.dayctrl.set_day(day)
def get_main_panel(self):
return self.panel
def get_weekday_number(self):
return self.numberctrl.get_day()
def get_weekday(self):
return self.dayctrl.get_day()
@classmethod
def compute_weekday_number_label(cls, number):
return cls.mwnctrl.compute_day_label(number)
@staticmethod
def compute_weekday_label(day):
return WeekDayCtrl.compute_day_label(day)
@staticmethod
def compute_widget_weekday(day):
return WeekDayCtrl.compute_widget_day(day)
class MonthInverseWeekdayCtrl(MonthWeekdayCtrl):
mwnctrl = MonthInverseWeekdayNumberCtrl
class DateHourCtrl(object):
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
# DatePickerCtrl doesn't release TAB (Outspline bug #332)
self.datectrl = wx.DatePickerCtrl(self.panel)
box.Add(self.datectrl, flag=wx.ALIGN_CENTER_VERTICAL)
self.hourctrl = HourCtrl(self.panel)
box.Add(self.hourctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, year, month, day, hour, minute):
sdate = wx.DateTime()
sdate.Set(year=year, month=month, day=day)
self.datectrl.SetValue(sdate)
self.hourctrl.set_values(hour, minute)
def get_main_panel(self):
return self.panel
def get_unix_time(self):
date = self.datectrl.GetValue()
hour = self.hourctrl.get_hour()
minute = self.hourctrl.get_minute()
fdate = datetime_.datetime(date.GetYear(), date.GetMonth() + 1,
date.GetDay(), hour, minute)
# Don't do this because it behaves incorrectly if the date is a day
# in which the DST starts or ends
#date = self.datectrl.GetValue().GetTicks()
#return date + hour * 3600 + minute * 60
return int(time_.mktime(fdate.timetuple()))
def get_year(self):
return self.datectrl.GetValue().GetYear()
def get_month(self):
return self.datectrl.GetValue().GetMonth()
def get_day(self):
return self.datectrl.GetValue().GetDay()
def get_hour(self):
return self.hourctrl.get_hour()
def get_minute(self):
return self.hourctrl.get_minute()
@staticmethod
def compute_month_label(month):
# Hardcode the names since only English is supported for the moment
# anyway
return ('January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December')[
month - 1]
class WeekDayHourCtrl(object):
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.dayctrl = WeekDayCtrl(self.panel)
box.Add(self.dayctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL)
self.hourctrl = HourCtrl(self.panel)
box.Add(self.hourctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, day, hour, minute):
self.dayctrl.set_day(day)
self.hourctrl.set_values(hour, minute)
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.get_day()
def get_hour(self):
return self.hourctrl.get_hour()
def get_minute(self):
return self.hourctrl.get_minute()
def get_relative_time(self):
return self.hourctrl.get_relative_time()
def get_relative_unix_week_time(self):
rday = self.dayctrl.get_relative_unix_time()
rhour = self.hourctrl.get_relative_time()
return rday + rhour
@staticmethod
def compute_widget_day(timew):
return WeekDayCtrl.compute_widget_day(timew)
class MonthDayHourCtrl(object):
# Defining mdctrl here lets derive other classes from this one more easily
mdctrl = MonthDayCtrl
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.dayctrl = self.mdctrl(self.panel)
box.Add(self.dayctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL)
self.hourctrl = HourCtrl(self.panel)
box.Add(self.hourctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, day, hour, minute):
self.dayctrl.set_day(day)
self.hourctrl.set_values(hour, minute)
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.get_day()
def get_hour(self):
return self.hourctrl.get_hour()
def get_minute(self):
return self.hourctrl.get_minute()
def get_relative_month_time(self):
rday = self.dayctrl.get_relative_time()
rhour = self.hourctrl.get_relative_time()
return rday + rhour
def get_relative_time(self):
return self.hourctrl.get_relative_time()
@classmethod
def compute_day_label(cls, day):
return cls.mdctrl.compute_day_label(day)
class MonthInverseDayHourCtrl(MonthDayHourCtrl):
mdctrl = MonthInverseDayCtrl
def get_relative_month_time(self):
rday = self.dayctrl.get_relative_time()
rhour = self.hourctrl.get_relative_time()
return rday + 86400 - rhour
class MonthWeekdayHourCtrl(MonthDayHourCtrl):
mdctrl = MonthWeekdayCtrl
def set_values(self, number, weekday, hour, minute):
self.dayctrl.set_values(number, weekday)
self.hourctrl.set_values(hour, minute)
def get_relative_time(self):
return self.hourctrl.get_relative_time()
def get_weekday_number(self):
return self.dayctrl.get_weekday_number()
def get_weekday(self):
return self.dayctrl.get_weekday()
@classmethod
def compute_weekday_number_label(cls, number):
return cls.mdctrl.compute_weekday_number_label(number)
@classmethod
def compute_weekday_label(cls, day):
return cls.mdctrl.compute_weekday_label(day)
@classmethod
def compute_widget_weekday(cls, day):
return cls.mdctrl.compute_widget_weekday(day)
class MonthInverseWeekdayHourCtrl(MonthWeekdayHourCtrl):
mdctrl = MonthInverseWeekdayCtrl
class TimeSpanCtrl(object):
def __init__(self, parent, min_number, max_number):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.numberctrl = NarrowSpinCtrl(self.panel, min=min_number,
max=max_number, style=wx.SP_ARROW_KEYS)
box.Add(self.numberctrl, flag=wx.ALIGN_CENTER_VERTICAL)
self.unitctrl = wx.Choice(self.panel,
choices=('minutes', 'hours', 'days', 'weeks'))
box.Add(self.unitctrl, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT |
wx.LEFT, border=12)
def set_values(self, number, unit):
self.numberctrl.SetValue(number)
self.unitctrl.SetSelection(self.unitctrl.FindString(unit))
def get_main_panel(self):
return self.panel
def get_time_span(self):
number = self.numberctrl.GetValue()
unit = self.unitctrl.GetString(self.unitctrl.GetSelection())
return self._compute_relative_time(number, unit)
def get_number(self):
return self.numberctrl.GetValue()
def get_unit(self):
return self.unitctrl.GetString(self.unitctrl.GetSelection())
@staticmethod
def _compute_relative_time(number, unit):
mult = {'minutes': 60,
'hours': 3600,
'days': 86400,
'weeks': 604800}
return number * mult[unit]
@staticmethod
def compute_widget_values(diff):
if diff == 0:
return (0, 'minutes')
else:
adiff = abs(diff)
# Same result as `1 if diff > 0 else -1`
neg = diff // adiff
for (number, unit) in ((604800, 'weeks'),
(86400, 'days'),
(3600, 'hours'),
(60, 'minutes')):
if adiff % number == 0:
return (adiff // number * neg, unit)
else:
return (adiff // 60 * neg, 'minutes')
class WeekdaysCtrl(MultipleChoiceCtrl):
# Hardcode the names since only English is supported for the moment anyway
dnames = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def __init__(self, parent):
MultipleChoiceCtrl.__init__(self, parent, self.dnames)
def set_days(self, days):
return self.set_values(days)
def get_days(self):
return self.get_values()
@classmethod
def compute_day_name(cls, day):
return cls.dnames[day - 1]
class MonthsCtrl(MultipleChoiceCtrl):
# Hardcode the names since only English is supported for the moment anyway
mnames = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
def __init__(self, parent):
MultipleChoiceCtrl.__init__(self, parent, self.mnames)
def set_months(self, months):
return self.set_values(months)
def get_months(self):
return self.get_values()
@classmethod
def compute_month_name(cls, month):
return cls.mnames[month - 1]
|
gpl-3.0
| 2,253,997,558,680,086,000
| 30.285425
| 79
| 0.5945
| false
| 3.621134
| false
| false
| false
|
schutte/submit
|
lib/submit/deliverers/sendmail.py
|
1
|
2776
|
# -*- coding: utf-8 -*-
#
# This file is part of submit, a sendmail replacement or supplement for
# multi-user desktop systems.
#
# Copyright © 2008 Michael Schutte <michi@uiae.at>
#
# submit is available under the terms of the MIT/X license. Please see the
# file COPYING for details.
from submit.deliverers import *
from submit.errors import *
from submit.i18n import *
import os
import shlex
import subprocess
__all__ = ["SendmailDeliverer"]
class SendmailDeliverer(AbstractDeliverer):
"""A deliverer submitting messages using a sendmail-compatible program."""
def needs_authentication(self):
"""Sendmail-based delivery methods never ask for authentication."""
return False
def authenticate(self, auth):
"""No authentication needed; do nothing."""
def abort(self):
"""Abort after a failed authentication procedure. Authentication will
never fail; still, do nothing."""
def deliver(self, message, rcpts):
"""Pipe the message through sendmail."""
program = self.config.get_method(str, self.method, "program",
default_sendmail)
if not program:
raise DeliveryFailedError(n_(
"Unable to find sendmail program."))
sendmail = shlex.split(program)
args = self.config.get_method(str, self.method, "arguments",
"-oem -oi")
if args: args = shlex.split(args)
else: args = []
cmd = sendmail + args + ["-f", message.efrom] + rcpts
proc = subprocess.Popen(cmd,
stdin = subprocess.PIPE, stderr = subprocess.PIPE)
proc.stdin.write(message.get_body())
proc.stdin.close()
if proc.wait() != 0:
details = proc.stderr.read().strip()
if details:
raise DeliveryFailedError(n_('"%(program)s" failed: %(details)s.'),
program=program, details=details)
else:
raise DeliveryFailedError(n_('"%(program)s" failed with unknown error.'),
program=program)
Deliverer = SendmailDeliverer
def default_sendmail():
"""Determine the path to the MTA sendmail implementation. Take into
account that `submit` itself might be called `sendmail`; in this case,
`sendmail.notsubmit` is what we are looking for."""
dirs = ("/usr/sbin", "/usr/lib")
files = ("sendmail.notsubmit", "sendmail")
for dir in dirs:
for filename in files:
filename = os.path.realpath(os.path.join(dir, filename))
if os.path.basename(filename) == "submit":
continue # avoid loops
elif os.access(filename, os.X_OK):
return filename
return None
# vim:tw=78:fo-=t:sw=4:sts=4:et:
|
mit
| 6,753,451,764,058,531,000
| 34.126582
| 89
| 0.615495
| false
| 3.998559
| false
| false
| false
|
kba/ocropy
|
ocrolib/common.py
|
1
|
37334
|
# -*- coding: utf-8 -*-
################################################################
### common functions for data structures, file name manipulation, etc.
################################################################
from __future__ import print_function
import os
import os.path
import re
import sys
import sysconfig
import unicodedata
import warnings
import inspect
import glob
import cPickle
import numpy
from numpy import *
import pylab
from pylab import imshow
from scipy.ndimage import morphology,measurements
import PIL
from default import getlocal
from toplevel import *
import chars
import codecs
import ligatures
import lstm
import morph
import multiprocessing
################################################################
### exceptions
################################################################
class OcropusException(Exception):
trace = 1
def __init__(self,*args,**kw):
Exception.__init__(self,*args,**kw)
class Unimplemented(OcropusException):
trace = 1
"Exception raised when a feature is unimplemented."
def __init__(self,s):
Exception.__init__(self,inspect.stack()[1][3])
class Internal(OcropusException):
trace = 1
"Exception raised when a feature is unimplemented."
def __init__(self,s):
Exception.__init__(self,inspect.stack()[1][3])
class RecognitionError(OcropusException):
trace = 1
"Some kind of error during recognition."
def __init__(self,explanation,**kw):
self.context = kw
s = [explanation]
s += ["%s=%s"%(k,summary(kw[k])) for k in kw]
message = " ".join(s)
Exception.__init__(self,message)
class Warning(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args,**kw)
class BadClassLabel(OcropusException):
trace = 0
"Exception for bad class labels in a dataset or input."
def __init__(self,s):
Exception.__init__(self,s)
class BadImage(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args)
class BadInput(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args,**kw)
class FileNotFound(OcropusException):
trace = 0
"""Some file-not-found error during OCRopus processing."""
def __init__(self,fname):
self.fname = fname
def __str__(self):
return "file not found %s"%(self.fname,)
pickle_mode = 2
def deprecated(f):
def _wrap(f):
warned = 0
def _wrapper(*args,**kw):
if not warned:
print(f, "has been DEPRECATED")
warned = 1
return f(*args,**kw)
return _wrap
################################################################
# text normalization
################################################################
def normalize_text(s):
"""Apply standard Unicode normalizations for OCR.
This eliminates common ambiguities and weird unicode
characters."""
s = unicode(s)
s = unicodedata.normalize('NFC',s)
s = re.sub(ur'\s+(?u)',' ',s)
s = re.sub(ur'\n(?u)','',s)
s = re.sub(ur'^\s+(?u)','',s)
s = re.sub(ur'\s+$(?u)','',s)
for m,r in chars.replacements:
s = re.sub(unicode(m),unicode(r),s)
return s
def project_text(s,kind="exact"):
"""Project text onto a smaller subset of characters
for comparison."""
s = normalize_text(s)
s = re.sub(ur'( *[.] *){4,}',u'....',s) # dot rows
s = re.sub(ur'[~_]',u'',s) # dot rows
if kind=="exact":
return s
if kind=="nospace":
return re.sub(ur'\s','',s)
if kind=="spletdig":
return re.sub(ur'[^A-Za-z0-9 ]','',s)
if kind=="letdig":
return re.sub(ur'[^A-Za-z0-9]','',s)
if kind=="letters":
return re.sub(ur'[^A-Za-z]','',s)
if kind=="digits":
return re.sub(ur'[^0-9]','',s)
if kind=="lnc":
s = s.upper()
return re.sub(ur'[^A-Z]','',s)
raise BadInput("unknown normalization: "+kind)
################################################################
### Text I/O
################################################################
def read_text(fname,nonl=1,normalize=1):
"""Read text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
with codecs.open(fname,"r","utf-8") as stream:
result = stream.read()
if nonl and len(result)>0 and result[-1]=='\n':
result = result[:-1]
if normalize:
result = normalize_text(result)
return result
def write_text(fname,text,nonl=0,normalize=1):
"""Write text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
if normalize:
text = normalize_text(text)
with codecs.open(fname,"w","utf-8") as stream:
stream.write(text)
if not nonl and text[-1]!='\n':
stream.write('\n')
################################################################
### Image I/O
################################################################
def pil2array(im,alpha=0):
if im.mode=="L":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0]
return a
if im.mode=="RGB":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0],3
return a
if im.mode=="RGBA":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0],4
if not alpha: a = a[:,:,:3]
return a
return pil2array(im.convert("L"))
def array2pil(a):
if a.dtype==dtype("B"):
if a.ndim==2:
return PIL.Image.frombytes("L",(a.shape[1],a.shape[0]),a.tostring())
elif a.ndim==3:
return PIL.Image.frombytes("RGB",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("bad image rank")
elif a.dtype==dtype('float32'):
return PIL.Image.fromstring("F",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("unknown image type")
def isbytearray(a):
return a.dtype in [dtype('uint8')]
def isfloatarray(a):
return a.dtype in [dtype('f'),dtype('float32'),dtype('float64')]
def isintarray(a):
return a.dtype in [dtype('B'),dtype('int16'),dtype('int32'),dtype('int64'),dtype('uint16'),dtype('uint32'),dtype('uint64')]
def isintegerarray(a):
return a.dtype in [dtype('int32'),dtype('int64'),dtype('uint32'),dtype('uint64')]
@checks(str,pageno=int,_=GRAYSCALE)
def read_image_gray(fname,pageno=0):
"""Read an image and returns it as a floating point array.
The optional page number allows images from files containing multiple
images to be addressed. Byte and short arrays are rescaled to
the range 0...1 (unsigned) or -1...1 (signed)."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.dtype==dtype('uint8'):
a = a/255.0
if a.dtype==dtype('int8'):
a = a/127.0
elif a.dtype==dtype('uint16'):
a = a/65536.0
elif a.dtype==dtype('int16'):
a = a/32767.0
elif isfloatarray(a):
pass
else:
raise OcropusException("unknown image type: "+a.dtype)
if a.ndim==3:
a = mean(a,2)
return a
def write_image_gray(fname,image,normalize=0,verbose=0):
"""Write an image to disk. If the image is of floating point
type, its values are clipped to the range [0,1],
multiplied by 255 and converted to unsigned bytes. Otherwise,
the image must be of type unsigned byte."""
if verbose: print("# writing", fname)
if isfloatarray(image):
image = array(255*clip(image,0.0,1.0),'B')
assert image.dtype==dtype('B'),"array has wrong dtype: %s"%image.dtype
im = array2pil(image)
im.save(fname)
@checks(str,_=ABINARY2)
def read_image_binary(fname,dtype='i',pageno=0):
"""Read an image from disk and return it as a binary image
of the given dtype."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.ndim==3: a = amax(a,axis=2)
return array(a>0.5*(amin(a)+amax(a)),dtype)
@checks(str,ABINARY2)
def write_image_binary(fname,image,verbose=0):
"""Write a binary image to disk. This verifies first that the given image
is, in fact, binary. The image may be of any type, but must consist of only
two values."""
if verbose: print("# writing", fname)
assert image.ndim==2
image = array(255*(image>midrange(image)),'B')
im = array2pil(image)
im.save(fname)
@checks(AINT3,_=AINT2)
def rgb2int(a):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert a.ndim==3
assert a.dtype==dtype('B')
return array(0xffffff&((0x10000*a[:,:,0])|(0x100*a[:,:,1])|a[:,:,2]),'i')
@checks(AINT2,_=AINT3)
def int2rgb(image):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert image.ndim==2
assert isintarray(image)
a = zeros(list(image.shape)+[3],'B')
a[:,:,0] = (image>>16)
a[:,:,1] = (image>>8)
a[:,:,2] = image
return a
@checks(LIGHTSEG,_=DARKSEG)
def make_seg_black(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0xffffff] = 0
return image
@checks(DARKSEG,_=LIGHTSEG)
def make_seg_white(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0] = 0xffffff
return image
@checks(str,_=LINESEG)
def read_line_segmentation(fname):
"""Reads a line segmentation, that is an RGB image whose values
encode the segmentation of a text line. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
image = rgb2int(a)
result = make_seg_black(image)
return result
@checks(str,LINESEG)
def write_line_segmentation(fname,image):
"""Writes a line segmentation, that is an RGB image whose values
encode the segmentation of a text line."""
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
@checks(str,_=PAGESEG)
def read_page_segmentation(fname):
"""Reads a page segmentation, that is an RGB image whose values
encode the segmentation of a page. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
segmentation = rgb2int(a)
segmentation = make_seg_black(segmentation)
return segmentation
@checks(str,PAGESEG)
def write_page_segmentation(fname,image):
"""Writes a page segmentation, that is an RGB image whose values
encode the segmentation of a page."""
assert image.ndim==2
assert image.dtype in [dtype('int32'),dtype('int64')]
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
def iulib_page_iterator(files):
for fname in files:
image = read_image_gray(fname)
yield image,fname
def norm_max(a):
return a/amax(a)
def pad_by(image,r,dtype=None):
"""Symmetrically pad the image by the given amount.
FIXME: replace by scipy version."""
if dtype is None: dtype = image.dtype
w,h = image.shape
result = zeros((w+2*r,h+2*r))
result[r:(w+r),r:(h+r)] = image
return result
class RegionExtractor:
"""A class facilitating iterating over the parts of a segmentation."""
def __init__(self):
self.cache = {}
def clear(self):
del self.cache
self.cache = {}
def setImage(self,image):
return self.setImageMasked(image)
def setImageMasked(self,image,mask=None,lo=None,hi=None):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This picks a subset of the segmentation to iterate
over, using a mask and lo and hi values.."""
assert image.dtype==dtype('B') or image.dtype==dtype('i'),"image must be type B or i"
if image.ndim==3: image = rgb2int(image)
assert image.ndim==2,"wrong number of dimensions"
self.image = image
labels = image
if lo is not None: labels[labels<lo] = 0
if hi is not None: labels[labels>hi] = 0
if mask is not None: labels = bitwise_and(labels,mask)
labels,correspondence = morph.renumber_labels_ordered(labels,correspondence=1)
self.labels = labels
self.correspondence = correspondence
self.objects = [None]+morph.find_objects(labels)
def setPageColumns(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the columns."""
self.setImageMasked(image,0xff0000,hi=0x800000)
def setPageParagraphs(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the paragraphs (if present
in the segmentation)."""
self.setImageMasked(image,0xffff00,hi=0x800000)
def setPageLines(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the lines."""
self.setImageMasked(image,0xffffff,hi=0x800000)
def id(self,i):
"""Return the RGB pixel value for this segment."""
return self.correspondence[i]
def x0(self,i):
"""Return x0 (column) for the start of the box."""
return self.bbox(i)[1]
def x1(self,i):
"""Return x0 (column) for the end of the box."""
return self.bbox(i)[3]
def y0(self,i):
"""Return y0 (row) for the start of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[2]-1
def y1(self,i):
"""Return y0 (row) for the end of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[0]-1
def bbox(self,i):
"""Return the bounding box in raster coordinates
(row0,col0,row1,col1)."""
r = self.objects[i]
# print("@@@bbox", i, r)
return (r[0].start,r[1].start,r[0].stop,r[1].stop)
def bboxMath(self,i):
"""Return the bounding box in math coordinates
(row0,col0,row1,col1)."""
h = self.image.shape[0]
(y0,x0,y1,x1) = self.bbox(i)
return (h-y1-1,x0,h-y0-1,x1)
def length(self):
"""Return the number of components."""
return len(self.objects)
def mask(self,index,margin=0):
"""Return the mask for component index."""
b = self.objects[index]
# print("@@@mask", index, b)
m = self.labels[b]
m[m!=index] = 0
if margin>0: m = pad_by(m,margin)
return array(m!=0,'B')
def extract(self,image,index,margin=0):
"""Return the subimage for component index."""
h,w = image.shape[:2]
(r0,c0,r1,c1) = self.bbox(index)
# mask = self.mask(index,margin=margin)
return image[max(0,r0-margin):min(h,r1+margin),max(0,c0-margin):min(w,c1+margin),...]
def extractMasked(self,image,index,grow=0,bg=None,margin=0,dtype=None):
"""Return the masked subimage for component index, elsewhere the bg value."""
if bg is None: bg = amax(image)
h,w = image.shape[:2]
mask = self.mask(index,margin=margin)
# FIXME ... not circular
if grow>0: mask = morphology.binary_dilation(mask,iterations=grow)
mh,mw = mask.shape
box = self.bbox(index)
r0,c0,r1,c1 = box
subimage = improc.cut(image,(r0,c0,r0+mh-2*margin,c0+mw-2*margin),margin,bg=bg)
return where(mask,subimage,bg)
################################################################
### Object reading and writing
### This handles reading and writing zipped files directly,
### and it also contains workarounds for changed module/class names.
################################################################
def save_object(fname,obj,zip=0):
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"wb") as stream:
with os.popen("gzip -9 > '%s'"%fname,"wb") as stream:
cPickle.dump(obj,stream,2)
else:
with open(fname,"wb") as stream:
cPickle.dump(obj,stream,2)
def unpickle_find_global(mname,cname):
if mname=="lstm.lstm":
return getattr(lstm,cname)
if not mname in sys.modules.keys():
exec "import "+mname
return getattr(sys.modules[mname],cname)
def load_object(fname,zip=0,nofind=0,verbose=0):
"""Loads an object from disk. By default, this handles zipped files
and searches in the usual places for OCRopus. It also handles some
class names that have changed."""
if not nofind:
fname = ocropus_find_file(fname)
if verbose:
print("# loading object", fname)
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"rb") as stream:
with os.popen("gunzip < '%s'"%fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
else:
with open(fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
################################################################
### Simple record object.
################################################################
class Record:
"""A simple record datatype that allows initialization with
keyword arguments, as in Record(x=3,y=9)"""
def __init__(self,**kw):
self.__dict__.update(kw)
def like(self,obj):
self.__dict__.update(obj.__dict__)
return self
################################################################
### Histograms
################################################################
def chist(l):
"""Simple counting histogram. Takes a list of items
and returns a list of (count,object) tuples."""
counts = {}
for c in l:
counts[c] = counts.get(c,0)+1
hist = [(v,k) for k,v in counts.items()]
return sorted(hist,reverse=1)
################################################################
### multiprocessing
################################################################
def number_of_processors():
"""Estimates the number of processors."""
return multiprocessing.cpu_count()
# return int(os.popen("cat /proc/cpuinfo | grep 'processor.*:' | wc -l").read())
def parallel_map(fun,jobs,parallel=0,chunksize=1):
if parallel<2:
for e in jobs:
result = fun(e)
yield result
else:
try:
pool = multiprocessing.Pool(parallel)
for e in pool.imap_unordered(fun,jobs,chunksize):
yield e
finally:
pool.close()
pool.join()
del pool
def check_valid_class_label(s):
"""Determines whether the given character is a valid class label.
Control characters and spaces are not permitted."""
if type(s)==unicode:
if re.search(r'[\0-\x20]',s):
raise BadClassLabel(s)
elif type(s)==str:
if re.search(r'[^\x21-\x7e]',s):
raise BadClassLabel(s)
else:
raise BadClassLabel(s)
def summary(x):
"""Summarize a datatype as a string (for display and debugging)."""
if type(x)==numpy.ndarray:
return "<ndarray %s %s>"%(x.shape,x.dtype)
if type(x)==str and len(x)>10:
return '"%s..."'%x
if type(x)==list and len(x)>10:
return '%s...'%x
return str(x)
################################################################
### file name manipulation
################################################################
@checks(str,_=str)
def findfile(name,error=1):
result = ocropus_find_file(name)
return result
@checks(str)
def finddir(name):
"""Find some OCRopus-related resource by looking in a bunch off standard places.
(This needs to be integrated better with setup.py and the build system.)"""
local = getlocal()
path = name
if os.path.exists(path) and os.path.isdir(path): return path
path = local+name
if os.path.exists(path) and os.path.isdir(path): return path
_,tail = os.path.split(name)
path = tail
if os.path.exists(path) and os.path.isdir(path): return path
path = local+tail
if os.path.exists(path) and os.path.isdir(path): return path
raise FileNotFound("file '"+path+"' not found in . or /usr/local/share/ocropus/")
@checks(str)
def allsplitext(path):
"""Split all the pathname extensions, so that "a/b.c.d" -> "a/b", ".c.d" """
match = re.search(r'((.*/)*[^.]*)([^/]*)',path)
if not match:
return path,""
else:
return match.group(1),match.group(3)
@checks(str)
def base(path):
return allsplitext(path)[0]
@checks(str,{str,unicode})
def write_text_simple(file,s):
"""Write the given string s to the output file."""
with open(file,"w") as stream:
if type(s)==unicode: s = s.encode("utf-8")
stream.write(s)
@checks([str])
def glob_all(args):
"""Given a list of command line arguments, expand all of them with glob."""
result = []
for arg in args:
if arg[0]=="@":
with open(arg[1:],"r") as stream:
expanded = stream.read().split("\n")
expanded = [s for s in expanded if s!=""]
else:
expanded = sorted(glob.glob(arg))
if len(expanded)<1:
raise FileNotFound("%s: expansion did not yield any files"%arg)
result += expanded
return result
@checks([str])
def expand_args(args):
"""Given a list of command line arguments, if the
length is one, assume it's a book directory and expands it.
Otherwise returns the arguments unchanged."""
if len(args)==1 and os.path.isdir(args[0]):
return sorted(glob.glob(args[0]+"/????/??????.png"))
else:
return args
def ocropus_find_file(fname, gz=True):
"""Search for `fname` in one of the OCRopus data directories, as well as
the current directory). If `gz` is True, search also for gzipped files.
Result of searching $fname is the first existing in:
* $base/$fname
* $base/$fname.gz # if gz
* $base/model/$fname
* $base/model/$fname.gz # if gz
* $base/data/$fname
* $base/data/$fname.gz # if gz
* $base/gui/$fname
* $base/gui/$fname.gz # if gz
$base can be four base paths:
* `$OCROPUS_DATA` environment variable
* current working directory
* ../../../../share/ocropus from this file's install location
* `/usr/local/share/ocropus`
* `$PREFIX/share/ocropus` ($PREFIX being the Python installation
prefix, usually `/usr`)
"""
possible_prefixes = []
if os.getenv("OCROPUS_DATA"):
possible_prefixes.append(os.getenv("OCROPUS_DATA"))
possible_prefixes.append(os.curdir)
possible_prefixes.append(os.path.normpath(os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
os.pardir, os.pardir, os.pardir, os.pardir, "share", "ocropus")))
possible_prefixes.append("/usr/local/share/ocropus")
possible_prefixes.append(os.path.join(
sysconfig.get_config_var("datarootdir"), "ocropus"))
# Unique entries with preserved order in possible_prefixes
# http://stackoverflow.com/a/15637398/201318
possible_prefixes = [possible_prefixes[i] for i in
sorted(numpy.unique(possible_prefixes, return_index=True)[1])]
for prefix in possible_prefixes:
if not os.path.isdir(prefix):
continue
for basename in [".", "models", "data", "gui"]:
if not os.path.isdir(os.path.join(prefix, basename)):
continue
full = os.path.join(prefix, basename, fname)
if os.path.exists(full):
return full
if gz and os.path.exists(full + ".gz"):
return full + ".gz"
raise FileNotFound(fname)
def fvariant(fname,kind,gt=""):
"""Find the file variant corresponding to the given file name.
Possible fil variants are line (or png), rseg, cseg, fst, costs, and txt.
Ground truth files have an extra suffix (usually something like "gt",
as in 010001.gt.txt or 010001.rseg.gt.png). By default, the variant
with the same ground truth suffix is produced. The non-ground-truth
version can be produced with gt="", the ground truth version can
be produced with gt="gt" (or some other desired suffix)."""
if gt!="": gt = "."+gt
base,ext = allsplitext(fname)
# text output
if kind=="txt":
return base+gt+".txt"
assert gt=="","gt suffix may only be supplied for .txt files (%s,%s,%s)"%(fname,kind,gt)
# a text line image
if kind=="line" or kind=="png" or kind=="bin":
return base+".bin.png"
if kind=="nrm":
return base+".nrm.png"
# a recognition lattice
if kind=="lattice":
return base+gt+".lattice"
# raw segmentation
if kind=="rseg":
return base+".rseg.png"
# character segmentation
if kind=="cseg":
return base+".cseg.png"
# text specifically aligned with cseg (this may be different from gt or txt)
if kind=="aligned":
return base+".aligned"
# per character costs
if kind=="costs":
return base+".costs"
raise BadInput("unknown kind: %s"%kind)
################################################################
### Utility for setting "parameters" on an object: a list of keywords for
### changing instance variables.
################################################################
def set_params(object,kw,warn=1):
"""Given an object and a dictionary of keyword arguments,
set only those object properties that are already instance
variables of the given object. Returns a new dictionary
without the key,value pairs that have been used. If
all keywords have been used, afterwards, len(kw)==0."""
kw = kw.copy()
for k,v in kw.items():
if hasattr(object,k):
setattr(object,k,v)
del kw[k]
return kw
################################################################
### warning and logging
################################################################
def caller():
"""Just returns info about the caller in string for (for error messages)."""
frame = sys._getframe(2)
info = inspect.getframeinfo(frame)
result = "%s:%d (%s)"%(info.filename,info.lineno,info.function)
del frame
return result
def die(message,*args):
"""Die with an error message."""
message = message%args
message = caller()+" FATAL "+message+"\n"
sys.stderr.write(message)
sys.exit(1)
def warn(message,*args):
"""Give a warning message."""
message = message%args
message = caller()+" WARNING "+message+"\n"
sys.stderr.write(message)
already_warned = {}
def warn_once(message,*args):
"""Give a warning message, but just once."""
c = caller()
if c in already_warned: return
already_warned[c] = 1
message = message%args
message = c+" WARNING "+message+"\n"
sys.stderr.write(message)
def quick_check_page_components(page_bin,dpi):
"""Quickly check whether the components of page_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
def quick_check_line_components(line_bin,dpi):
"""Quickly check whether the components of line_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning,stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
################################################################
### conversion functions
################################################################
def ustrg2unicode(u,lig=ligatures.lig):
"""Convert an iulib ustrg to a Python unicode string; the
C++ version iulib.ustrg2unicode does weird things for special
symbols like -3"""
result = ""
for i in range(u.length()):
value = u.at(i)
if value>=0:
c = lig.chr(value)
if c is not None:
result += c
else:
result += "<%d>"%value
return result
### code for instantiation native components
def pyconstruct(s):
"""Constructs a Python object from a constructor, an expression
of the form x.y.z.name(args). This ensures that x.y.z is imported.
In the future, more forms of syntax may be accepted."""
env = {}
if "(" not in s:
s += "()"
path = s[:s.find("(")]
if "." in path:
module = path[:path.rfind(".")]
print("import", module)
exec "import "+module in env
return eval(s,env)
def mkpython(name):
"""Tries to instantiate a Python class. Gives an error if it looks
like a Python class but can't be instantiated. Returns None if it
doesn't look like a Python class."""
if name is None or len(name)==0:
return None
elif type(name) is not str:
return name()
elif name[0]=="=":
return pyconstruct(name[1:])
elif "(" in name or "." in name:
return pyconstruct(name)
else:
return None
################################################################
### loading and saving components
################################################################
# This code has to deal with a lot of special cases for all the
# different formats we have accrued.
def obinfo(ob):
"""A bit of information about the given object. Returns
the str representation of the object, and if it has a shape,
also includes the shape."""
result = str(ob)
if hasattr(ob,"shape"):
result += " "
result += str(ob.shape)
return result
def save_component(file,object,verbose=0,verify=0):
"""Save an object to disk in an appropriate format. If the object
is a wrapper for a native component (=inherits from
CommonComponent and has a comp attribute, or is in package
ocropus), write it using ocropus.save_component in native format.
Otherwise, write it using Python's pickle. We could use pickle
for everything (since the native components pickle), but that
would be slower and more confusing."""
if hasattr(object,"save_component"):
object.save_component(file)
return
if object.__class__.__name__=="CommonComponent" and hasattr(object,"comp"):
# FIXME -- get rid of this eventually
import ocropus
ocropus.save_component(file,object.comp)
return
if type(object).__module__=="ocropus":
import ocropus
ocropus.save_component(file,object)
return
if verbose:
print("[save_component]")
if verbose:
for k,v in object.__dict__.items():
print(":", k, obinfo(v))
with open(file,"wb") as stream:
pickle.dump(object,stream,pickle_mode)
if verify:
if verbose:
print("[trying to read it again]")
with open(file,"rb") as stream:
pickle.load(stream)
def load_component(file):
"""Load a component. This handles various special cases,
including old-style C++ recognizers (soon to be gotten rid of),
python expressions ("=package.ObjectName(arg1,arg2)"),
and simple pickled Python objects (default)."""
if file[0]=="=":
return pyconstruct(file[1:])
elif file[0]=="@":
file = file[1:]
with open(file,"r") as stream:
# FIXME -- get rid of this eventually
start = stream.read(128)
if start.startswith("<object>\nlinerec\n"):
# FIXME -- get rid of this eventually
warnings.warn("loading old-style linerec: %s"%file)
result = RecognizeLine()
import ocropus
result.comp = ocropus.load_IRecognizeLine(file)
return result
if start.startswith("<object>"):
# FIXME -- get rid of this eventually
warnings.warn("loading old-style cmodel: %s"%file)
import ocroold
result = ocroold.Model()
import ocropus
result.comp = ocropus.load_IModel(file)
return result
return load_object(file)
def binarize_range(image,dtype='B',threshold=0.5):
"""Binarize an image by its range."""
threshold = (amax(image)+amin(image))*threshold
scale = 1
if dtype=='B': scale = 255
return array(scale*(image>threshold),dtype=dtype)
def draw_pseg(pseg,axis=None):
if axis is None:
axis = subplot(111)
h = pseg.dim(1)
regions = ocropy.RegionExtractor()
regions.setPageLines(pseg)
for i in range(1,regions.length()):
x0,y0,x1,y1 = (regions.x0(i),regions.y0(i),regions.x1(i),regions.y1(i))
p = patches.Rectangle((x0,h-y1-1),x1-x0,y1-y0,edgecolor="red",fill=0)
axis.add_patch(p)
def draw_aligned(result,axis=None):
raise Unimplemented("FIXME draw_aligned")
if axis is None:
axis = subplot(111)
axis.imshow(NI(result.image),cmap=cm.gray)
cseg = result.cseg
if type(cseg)==numpy.ndarray: cseg = common.lseg2narray(cseg)
ocropy.make_line_segmentation_black(cseg)
ocropy.renumber_labels(cseg,1)
bboxes = ocropy.rectarray()
ocropy.bounding_boxes(bboxes,cseg)
s = re.sub(r'\s+','',result.output)
h = cseg.dim(1)
for i in range(1,bboxes.length()):
r = bboxes.at(i)
x0,y0,x1,y1 = (r.x0,r.y0,r.x1,r.y1)
p = patches.Rectangle((x0,h-y1-1),x1-x0,y1-y0,edgecolor=(0.0,0.0,1.0,0.5),fill=0)
axis.add_patch(p)
if i>0 and i-1<len(s):
axis.text(x0,h-y0-1,s[i-1],color="red",weight="bold",fontsize=14)
draw()
def plotgrid(data,d=10,shape=(30,30)):
"""Plot a list of images on a grid."""
ion()
gray()
clf()
for i in range(min(d*d,len(data))):
subplot(d,d,i+1)
row = data[i]
if shape is not None: row = row.reshape(shape)
imshow(row)
ginput(1,timeout=0.1)
def showrgb(r,g=None,b=None):
if g is None: g = r
if b is None: b = r
imshow(array([r,g,b]).transpose([1,2,0]))
def showgrid(l,cols=None,n=400,titles=None,xlabels=None,ylabels=None,**kw):
if "cmap" not in kw: kw["cmap"] = pylab.cm.gray
if "interpolation" not in kw: kw["interpolation"] = "nearest"
n = minimum(n,len(l))
if cols is None: cols = int(sqrt(n))
rows = (n+cols-1)//cols
for i in range(n):
pylab.xticks([]); pylab.yticks([])
pylab.subplot(rows,cols,i+1)
pylab.imshow(l[i],**kw)
if titles is not None: pylab.title(str(titles[i]))
if xlabels is not None: pylab.xlabel(str(xlabels[i]))
if ylabels is not None: pylab.ylabel(str(ylabels[i]))
def gt_explode(s):
l = re.split(r'_(.{1,4})_',s)
result = []
for i,e in enumerate(l):
if i%2==0:
result += [c for c in e]
else:
result += [e]
result = [re.sub("\001","_",s) for s in result]
result = [re.sub("\002","\\\\",s) for s in result]
return result
def gt_implode(l):
result = []
for c in l:
if c=="_":
result.append("___")
elif len(c)<=1:
result.append(c)
elif len(c)<=4:
result.append("_"+c+"_")
else:
raise BadInput("cannot create ground truth transcription for: %s"%l)
return "".join(result)
@checks(int,sequence=int,frac=int,_=BOOL)
def testset(index,sequence=0,frac=10):
# this doesn't have to be good, just a fast, somewhat random function
return sequence==int(abs(sin(index))*1.23456789e6)%frac
def midrange(image,frac=0.5):
"""Computes the center of the range of image values
(for quick thresholding)."""
return frac*(amin(image)+amax(image))
def remove_noise(line,minsize=8):
"""Remove small pixels from an image."""
if minsize==0: return line
bin = (line>0.5*amax(line))
labels,n = morph.label(bin)
sums = measurements.sum(bin,labels,range(n+1))
sums = sums[labels]
good = minimum(bin,1-(sums>0)*(sums<minsize))
return good
class MovingStats:
def __init__(self,n=100):
self.data = []
self.n = n
self.count = 0
def add(self,x):
self.data += [x]
self.data = self.data[-self.n:]
self.count += 1
def mean(self):
if len(self.data)==0: return nan
return mean(self.data)
|
apache-2.0
| -5,726,180,883,866,732,000
| 32.970883
| 127
| 0.583329
| false
| 3.571948
| false
| false
| false
|
tombusby/Log-Bitbucket-History
|
logger_html.py
|
1
|
2069
|
#!/usr/bin/env python
import feedparser, sys, hashlib, os
from lxml import etree
from StringIO import StringIO
from datetime import datetime
def get_log_file_location():
file_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(file_dir, "work_log.html")
def make_table_header_row(table):
row = etree.SubElement(table, "tr")
etree.SubElement(row, "th").text = "Published"
etree.SubElement(row, "th").text = "Processed to Log"
etree.SubElement(row, "th").text = "Description"
def parse_entry_summary(entry):
tree = etree.parse(StringIO(entry["summary"]), parse_entry_summary.parser)
return tree.find(".//body").getchildren()
parse_entry_summary.parser = etree.HTMLParser()
def make_table_row(table, hash, entry):
row = etree.SubElement(table, "tr")
row.attrib["hash"] = hash
etree.SubElement(row, "td").text = entry["published"]
etree.SubElement(row, "td").text = datetime.today().isoformat()
summary = etree.SubElement(row, "td")
for element in parse_entry_summary(entry):
summary.append(element)
def get_existing_log():
try:
return etree.parse(get_log_file_location()).getroot()
except:
table = etree.Element("table")
table.attrib["border"] = "1"
table.attrib["style"] = "border-collapse: collapse;"
make_table_header_row(table)
return table
def get_existing_hashes(tree):
return tree.xpath(".//tr/@hash")
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: {} <user_id> <token>".format(sys.argv[0])
exit()
user_id, token = sys.argv[1:3]
table = get_existing_log()
hashes = get_existing_hashes(table)
feed_items = feedparser.parse("https://bitbucket.org/{}/rss/feed?token={}".format(user_id, token))
entries_for_user = filter(lambda e: user_id in e["title"], feed_items.entries)
for entry in sorted(entries_for_user, key=lambda k: k["published_parsed"]):
hash = hashlib.sha1(entry.published + entry.summary).hexdigest()
if hash not in hashes:
make_table_row(table, hash, entry)
with open(get_log_file_location(), "w+") as f:
f.write(etree.tostring(table, pretty_print=True))
|
mit
| 1,247,881,574,972,655,900
| 31.84127
| 99
| 0.699372
| false
| 3.007267
| false
| false
| false
|
betur/btce-api
|
btceapi/keyhandler.py
|
1
|
2463
|
# Copyright (c) 2013 Alan McIntyre
import warnings
class KeyData(object):
def __init__(self, secret, nonce):
self.secret = secret
self.nonce = nonce
class KeyHandler(object):
'''KeyHandler handles the tedious task of managing nonces associated
with a BTC-e API key/secret pair.
The getNextNonce method is threadsafe, all others are not.'''
def __init__(self, filename=None, resaveOnDeletion=True):
'''The given file is assumed to be a text file with three lines
(key, secret, nonce) per entry.'''
if not resaveOnDeletion:
warnings.warn("The resaveOnDeletion argument to KeyHandler will"
" default to True in future versions.")
self._keys = {}
self.resaveOnDeletion = False
self.filename = filename
if filename is not None:
self.resaveOnDeletion = resaveOnDeletion
f = open(filename, "rt")
while True:
key = f.readline().strip()
if not key:
break
secret = f.readline().strip()
nonce = int(f.readline().strip())
self.addKey(key, secret, nonce)
def __del__(self):
self.close()
def close(self):
if self.resaveOnDeletion:
self.save(self.filename)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def keys(self):
return self._keys.keys()
def getKeys(self):
return self._keys.keys()
def save(self, filename):
f = open(filename, "wt")
for k, data in self._keys.items():
f.write("%s\n%s\n%d\n" % (k, data.secret, data.nonce))
def addKey(self, key, secret, next_nonce):
self._keys[key] = KeyData(secret, next_nonce)
def getNextNonce(self, key):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
nonce = data.nonce
data.nonce += 1
return nonce
def getSecret(self, key):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
return data.secret
def setNextNonce(self, key, next_nonce):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
data.nonce = next_nonce
|
mit
| 1,563,550,528,482,904,300
| 27.976471
| 76
| 0.56151
| false
| 3.890995
| false
| false
| false
|
GLolol/lightdm-gtk-greeter-settings-deb
|
lightdm_gtk_greeter_settings/IndicatorPropertiesDialog.py
|
1
|
12548
|
#!/usr/bin/env python3
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# LightDM GTK Greeter Settings
# Copyright (C) 2014 Andrew P. <pan.pav.7c5@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from copy import deepcopy
from glob import iglob
from gi.repository import Gtk
from lightdm_gtk_greeter_settings import (
IconEntry,
OptionEntry)
from lightdm_gtk_greeter_settings.helpers import (
C_,
bool2string,
string2bool,
get_data_path,
get_greeter_version,
SimpleEnum,
WidgetsEnum,
WidgetsWrapper)
from lightdm_gtk_greeter_settings.IndicatorsEntry import (
EmptyIndicators,
Indicators,
LayoutSet,
Option)
__all__ = ['IndicatorPropertiesDialog']
class IndicatorPath(OptionEntry.StringPathEntry):
class Row(SimpleEnum):
Title = ()
Type = ()
Icon = ()
class IndicatorIconEntry(IconEntry.IconEntry):
DefaultValue = ()
def __init__(self, widgets):
self._label = widgets['label']
super().__init__(widgets)
def _set_value(self, value):
super()._set_value(self.DefaultValue if value is None else value)
self._label.set_markup(self._current_item.menuitem.get_label())
self._image.props.visible = value not in (None, self.DefaultValue)
def _get_value(self):
return super()._get_value() or None
def _get_items(self):
for item in super()._get_items():
yield item
yield -1, (self._update_default, self._ask_default)
def _update_default(self, value, just_label):
if just_label or value is not self.DefaultValue:
return C_('option-entry|indicators', 'Use default value...'), None
self._image.props.icon_name = ''
label = C_('option-entry|indicators', '<b>Using default value</b>')
return label, label
def _ask_default(self, oldvalue):
return self.DefaultValue
class IndicatorTypeEntry(OptionEntry.BaseEntry):
def __init__(self, widgets):
super().__init__(widgets)
self._types = widgets['types']
self._indicator_choice = widgets['indicator_choice']
self._spacer_choice = widgets['spacer_choice']
self._separator_choice = widgets['separator_choice']
self._types.connect('changed', self._emit_changed)
self._indicator_choice.connect('toggled', self._on_choice_changed, None,
(self._types, widgets['indicator_box']))
self._spacer_choice.connect('toggled', self._on_choice_changed, Indicators.Spacer)
self._separator_choice.connect('toggled', self._on_choice_changed, Indicators.Separator)
self._value = None
def add_type(self, name, title):
if name not in EmptyIndicators:
self._types.append(name, title or name)
def _get_value(self):
if self._indicator_choice.props.active:
return self._types.props.active_id
else:
return self._value
def _set_value(self, value):
if value == Indicators.Spacer:
button = self._spacer_choice
elif value == Indicators.Separator:
button = self._separator_choice
else:
button = self._indicator_choice
self._value = value
self._types.set_active_id(value)
if button.props.active:
button.toggled()
else:
button.props.active = True
def _on_choice_changed(self, button, value, widgets=[]):
for w in widgets:
w.props.sensitive = button.props.active
if button.props.active:
self._value = value if value else self._types.props.active_id
self._emit_changed()
class IndicatorPropertiesDialog(Gtk.Dialog):
__gtype_name__ = 'IndicatorPropertiesDialog'
class Widgets(WidgetsEnum):
add = 'add_button'
ok = 'ok_button'
infobar = 'infobar'
message = 'message'
common_options = 'common_options_box'
custom_options = 'custom_options_box'
path = 'option_path_combo'
path_model = 'option_path_model'
hide_disabled = 'option_power_hide_disabled'
def __new__(cls, *args, **kwargs):
builder = Gtk.Builder()
builder.add_from_file(get_data_path('%s.ui' % cls.__name__))
window = builder.get_object('indicator_properties_dialog')
window.builder = builder
builder.connect_signals(window)
window.init_window(*args, **kwargs)
return window
def init_window(self, is_duplicate=None, get_defaults=None, get_name=str):
self._widgets = self.Widgets(builder=self.builder)
self._get_defaults = get_defaults
self._add_indicator = None
self._is_duplicate = is_duplicate
self._get_name = get_name
self._indicator_loaded = False
self._name = None
self._reversed = False
self._name2page = {}
for i in range(0, self._widgets.custom_options.get_n_pages()):
page = self._widgets.custom_options.get_nth_page(i)
name = Gtk.Buildable.get_name(page)
self._name2page['~' + name.rsplit('_')[-1]] = i
if get_greeter_version() < 0x020100:
self._widgets.common_options.props.visible = False
self._name2page = {
Indicators.External: self._name2page[Indicators.External],
Indicators.Text: self._name2page[Indicators.Text]}
text_prefix = 'option_text_fallback'
else:
self._name2page[Indicators.Text] = -1
text_prefix = 'option_text'
self._option_type = IndicatorTypeEntry(WidgetsWrapper(self.builder, 'option_type'))
self._option_text = OptionEntry.StringEntry(WidgetsWrapper(self.builder, text_prefix))
self._option_image = IndicatorIconEntry(WidgetsWrapper(self.builder, 'option_image'))
self._option_path = IndicatorPath(WidgetsWrapper(self.builder, 'option_path'))
self._option_hide_disabled = \
OptionEntry.BooleanEntry(WidgetsWrapper(self.builder, 'option_hide_disabled'))
for entry in (self._option_type, self._option_path):
entry.changed.connect(self._on_option_changed)
for name in Indicators:
self._option_type.add_type(name, self._get_name(name))
# Hiding first column created by Gtk.ComboBoxText
self._widgets.path.get_cells()[0].props.visible = False
for path in sorted(iglob(os.path.join(sys.prefix, 'share', 'unity', 'indicators', '*'))):
name = os.path.basename(path)
parts = name.rsplit('.', maxsplit=1)
if len(parts) == 2 and parts[0] == 'com.canonical.indicator':
name = parts[1]
row = IndicatorPath.Row._make(Type=IndicatorPath.ItemType.Value,
Title=name,
Icon='application-x-executable')
self._widgets.path_model.append(row)
for path in sorted(iglob(os.path.join(sys.prefix, 'lib', 'indicators3', '7', '*.so'))):
row = IndicatorPath.Row._make(Type=IndicatorPath.ItemType.Value,
Title=os.path.basename(path),
Icon='application-x-executable')
self._widgets.path_model.append(row)
def _on_option_changed(self, entry=None):
if not self._indicator_loaded:
return
name = self._option_type.value
error = None
warning = None
if name == Indicators.External:
if not str(self._option_path.value).strip():
error = C_('option-entry|indicators', 'Path/Service field is not filled')
elif name != self._name:
if self._is_duplicate and self._is_duplicate(name):
warning = C_('option-entry|indicators',
'Indicator "{name}" is already in the list.\n'
'It will be overwritten.').format(name=self._get_name(name, name))
self._widgets.ok.props.sensitive = error is None
self._widgets.add.props.sensitive = error is None
self._widgets.infobar.props.visible = error or warning
self._widgets.message.props.label = error or warning
if error:
self._widgets.infobar.props.message_type = Gtk.MessageType.WARNING
elif warning:
self._widgets.infobar.props.message_type = Gtk.MessageType.INFO
else:
self._widgets.infobar.props.message_type = Gtk.MessageType.OTHER
def on_option_type_types_changed(self, combo):
current = self._widgets.custom_options.props.page
if current != -1:
self._widgets.custom_options.get_nth_page(current).props.visible = False
current = self._name2page.get(combo.props.active_id, -1)
if current != -1:
self._widgets.custom_options.get_nth_page(current).props.visible = True
self._widgets.custom_options.props.page = current
if self._indicator_loaded:
defaults = self._get_defaults(combo.props.active_id)
self._option_text.enabled = Option.Text in defaults
self._option_image.enabled = Option.Image in defaults
def on_add_clicked(self, widget):
self._add_callback(self.get_indicator())
self._options = deepcopy(self._options)
self._on_option_changed()
@property
def add_callback(self):
return self._add_callback
@add_callback.setter
def add_callback(self, value):
self._add_callback = value
self._widgets.add.props.visible = value is not None
def set_indicator(self, options):
self._indicator_loaded = False
self._options = deepcopy(options)
self._name = options[Option.Name]
self._option_type.value = options[Option.Name]
self._option_path.value = options.get(Option.Path)
self._option_text.value = options.get(Option.Text, '')
self._option_text.enabled = Option.Text in options
self._option_image.value = options.get(Option.Image)
self._option_image.enabled = Option.Image in options
self._reversed = Option.Layout in options and LayoutSet.Reversed in options[Option.Layout]
hide_disabled = options.get(Option.HideDisabled, bool2string(False))
self._option_hide_disabled.value = hide_disabled or bool2string(True)
self._indicator_loaded = True
self._on_option_changed()
def get_indicator(self):
options = self._options
name = self._option_type.value
options[Option.Name] = name
options[Option.Layout] = set()
if name not in EmptyIndicators:
if self._option_text.enabled:
options[Option.Text] = self._option_text.value or None
options[Option.Layout].add(LayoutSet.Text)
if self._option_image.enabled:
options[Option.Image] = self._option_image.value or None
options[Option.Layout].add(LayoutSet.Image)
if self._option_text.enabled and self._option_image.enabled and self._reversed:
options[Option.Layout].add(LayoutSet.Reversed)
if LayoutSet.Text not in options[Option.Layout] and Option.Text in options:
del options[Option.Text]
if LayoutSet.Image not in options[Option.Layout] and Option.Image in options:
del options[Option.Image]
if name == Indicators.External:
options[Option.Path] = self._option_path.value
else:
options.pop(Option.Path, None)
if name == Indicators.Power and string2bool(self._option_hide_disabled.value):
options[Option.HideDisabled] = None
elif Option.HideDisabled in options:
options.pop(Option.HideDisabled, None)
return options
|
gpl-3.0
| -3,611,849,266,747,698,000
| 36.681682
| 98
| 0.618664
| false
| 3.937245
| false
| false
| false
|
kdheepak89/fono
|
fono/run.py
|
1
|
4290
|
#!/usr/bin/env python
"""Run module."""
import click
import data
import ReferenceModel
import solve
import version
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--folder', type=click.Path(), help='Path to data folder')
@click.option('--quantity', type=click.Path(), help='Path to quantity.csv file')
@click.option('--price', type=click.Path(), help='Path to price.csv file')
@click.option('--shipping', type=click.Path(), help='Path to shipping.csv file')
@click.option('--mipgap', type=click.FLOAT, default=0.001, help='Value of mipgap')
@click.option('--color', default='white', help='Color of solution (e.g. --color=red)')
@click.option('--fono-color', default='green', help='Color of solution (e.g. --fono-color=blue)')
@click.version_option(version.__version__, '-v', '--version')
def main(**kwargs):
"""'Find Optimal Number of Orders' aka fono."""
color = kwargs.pop('color')
fono_color = kwargs.pop('fono_color')
try:
if not any([kwargs[key] for key in kwargs]):
help_str = "{}".format(click.get_current_context().get_help())
click.secho(help_str)
click.get_current_context().exit()
def show_item(item):
if item is not None:
return item
click.echo("")
click.secho("Find the Optimal Number of Orders:", fg=fono_color, bold=True)
click.echo("")
with click.progressbar(
('Getting data', 'Creating model', 'Solving', 'Finished'),
label='fono:',
item_show_func=show_item) as bar:
for item in bar:
if item == 'Getting data':
if kwargs['folder']:
price, quantity, shipping = data.get_input(kwargs['folder'])
elif kwargs['quantity'] and kwargs['price'] and kwargs['shipping']:
quantity = data.get_quantity(kwargs['quantity'])
price = data.get_price(kwargs['price'])
shipping = data.get_shipping(kwargs['shipping'])
elif item == 'Creating model':
model = ReferenceModel.create_model(price, quantity, shipping)
elif item == 'Solving':
mipgap = kwargs.get('mipgap')
solve.solve_instance(model, mipgap=mipgap), model
# solve.display_results(solve.solve_instance(model), model)
click.echo("")
click.secho("fono results:", fg=fono_color, bold=True)
for website in sorted(model.Websites):
click.secho("")
click.secho("{}".format(website), fg=color, bold=True, nl=False)
click.secho(":")
for item in sorted(model.Items):
if model.Quantity[website, item].value > 0:
click.echo("Buy ", nl=False)
click.secho("{} ".format(int(model.Quantity[website, item].value)), fg=color, bold=True, nl=False)
click.echo("item(s) of ", nl=False)
click.secho("{} ".format(item), fg=color, bold=True, nl=False)
click.echo("for a total of ", nl=False)
click.secho("{} ".format(price[(website, item)] * model.Quantity[website, item].value),
fg=color,
bold=True,
nl=False)
click.echo("dollars", nl=False)
click.secho(".")
click.echo("")
item_costs = model.Cost['Item'].value
shipping_costs = model.Cost['Shipping'].value
total_costs = item_costs + shipping_costs
click.secho("Total product costs = {} dollars".format(item_costs), bold=True)
click.secho("Total shipping costs = {} dollars".format(shipping_costs), bold=True)
click.echo("")
click.secho("Total costs = {} dollars".format(total_costs), fg=fono_color, bold=True)
click.echo("")
except Exception as e:
click.echo('')
raise click.ClickException("{}\n\nCheck the help (--help) on how to use fono or contact the developer.".format(
e.message))
if __name__ == '__main__':
main()
|
bsd-3-clause
| -4,993,770,995,784,116,000
| 41.058824
| 119
| 0.557576
| false
| 3.968548
| false
| false
| false
|
urschrei/simplification
|
benchmark_runner.py
|
1
|
1269
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Standalone benchmark runner
"""
import cProfile
import pstats
import profile
import numpy as np
print("Running Rust + Cython benchmarks")
# calibrate
pr = profile.Profile()
calibration = np.mean([pr.calibrate(100000) for x in range(5)])
# add the bias
profile.Profile.bias = calibration
with open("simplification/test/cprofile_rust_cython.py", "rb") as f1:
c1 = f1.read()
with open("simplification/test/cprofile_rust_cython_complex.py", "rb") as f2:
c2 = f2.read()
with open("simplification/test/cprofile_rust_cython_shapely.py", "rb") as f3:
c3 = f3.read()
cProfile.run(c1, "simplification/test/output_stats_rust_cython")
rust_cython = pstats.Stats("simplification/test/output_stats_rust_cython")
cProfile.run(c2, "simplification/test/output_stats_rust_cython_complex")
rust_cython_c = pstats.Stats("simplification/test/output_stats_rust_cython_complex")
cProfile.run(c3, "simplification/test/output_stats_rust_cython_shapely")
shapely = pstats.Stats("simplification/test/output_stats_rust_cython_shapely")
print("Rust Cython Benchmarks\n")
rust_cython.sort_stats("cumulative").print_stats(5)
rust_cython_c.sort_stats("cumulative").print_stats(5)
shapely.sort_stats("cumulative").print_stats(20)
|
mit
| -8,462,080,342,845,628,000
| 29.95122
| 84
| 0.746257
| false
| 2.890661
| true
| false
| false
|
corbinq27/priceTweeter
|
product_extractor.py
|
1
|
1197
|
__author__ = 'corbinq27'
import re
import json
import urllib2
#fairly specialized python script to extract prices from specific pages on wholesalegaming.biz
class ProductExtractor():
def __init__(self):
pass
def product_extractor(self):
the_magic_regex_string = '<tr bgcolor="#FFFFFF">\r\n <td align="left"><font color="black" face="Arial, Helvetica"'+ \
' size="2"><a CLASS="anylink" href="([^\"]+)">([^<]+)</a></font></td>'
list_of_urls = {}
with open("/tmp/hills_urls.json", "rb") as urls:
list_of_urls = json.loads(urls.read())
dict_of_pages_to_check = {"urls": []}
for each_page in list_of_urls["urls"]:
response = urllib2.urlopen(each_page)
page_source = response.read()
m = re.finditer(the_magic_regex_string, page_source)
for each_group in m:
url = "%s%s" % (each_page, each_group.group(1))
print url
dict_of_pages_to_check["urls"].append(url)
with open("/tmp/pages_to_check.json", "w") as fp:
json.dump(dict_of_pages_to_check, fp, sort_keys=True, indent=4)
|
mit
| -4,489,230,412,158,461,400
| 34.205882
| 126
| 0.555556
| false
| 3.449568
| false
| false
| false
|
portnov/sverchok
|
nodes/vector/formula_deform.py
|
1
|
2365
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from math import *
from bpy.props import StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (updateNode)
class SvFormulaDeformNode(bpy.types.Node, SverchCustomTreeNode):
''' Deform Verts by Math '''
bl_idname = 'SvFormulaDeformNode'
bl_label = 'Deform by formula'
bl_icon = 'OUTLINER_OB_EMPTY'
ModeX = StringProperty(name='formulaX', default='x', update=updateNode)
ModeY = StringProperty(name='formulaY', default='y', update=updateNode)
ModeZ = StringProperty(name='formulaZ', default='z', update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', 'Verts')
self.outputs.new('VerticesSocket', 'Verts')
def draw_buttons(self, context, layout):
for element in 'XYZ':
row = layout.row()
split = row.split(percentage=0.15)
split.label(element)
split.split().prop(self, "Mode"+element, text='')
def process(self):
Io = self.inputs[0]
Oo = self.outputs[0]
if Oo.is_linked:
out = []
V = Io.sv_get()
Value = "[("+self.ModeX+","+self.ModeY+","+self.ModeZ+") for (x, y, z),i in zip(L, I)]"
for L in V:
I = range(len(L))
out.append(eval(Value))
Oo.sv_set(out)
def update_socket(self, context):
self.update()
def register():
bpy.utils.register_class(SvFormulaDeformNode)
def unregister():
bpy.utils.unregister_class(SvFormulaDeformNode)
|
gpl-3.0
| 7,887,637,199,607,015,000
| 33.779412
| 99
| 0.654123
| false
| 3.599696
| false
| false
| false
|
bmerry/entropy
|
entropy/__init__.py
|
1
|
1900
|
# Entropy: pauses Rhythmbox when the play queue is finished
# Copyright (C) 2014 Bruce Merry <bmerry@users.sourceforge.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from gi.repository import GObject, RB, Peas
import gettext
gettext.install('rhythmbox', RB.locale_dir())
class EntropyPlugin(GObject.Object, Peas.Activatable):
object = GObject.property(type = GObject.Object)
def __init__(self):
super(EntropyPlugin, self).__init__()
def get_shell_player(self):
return self.object.props.shell_player
def song_changed(self, entry, user_data):
shell_player = self.get_shell_player()
if shell_player.props.playing and self.playing_from_queue and not shell_player.props.playing_from_queue:
shell_player.stop()
self.playing_from_queue = shell_player.props.playing_from_queue
def do_activate(self):
'''
Plugin activation
'''
shell_player = self.get_shell_player()
self.playing_from_queue = shell_player.props.playing_from_queue
self.song_changed_id = shell_player.connect('playing-song-changed', self.song_changed)
def do_deactivate(self):
shell_player = self.get_shell_player()
shell_player.disconnect(self.song_changed_id)
|
gpl-3.0
| -4,271,626,382,035,553,300
| 37.77551
| 112
| 0.71
| false
| 3.784861
| false
| false
| false
|
hale36/SRTV
|
sickbeard/providers/generic.py
|
1
|
26955
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import itertools
import urllib
from random import shuffle
from base64 import b16encode, b32decode
import requests
from hachoir_parser import createParser
import sickbeard
from sickbeard import helpers, classes, logger, db
from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT
from sickbeard import tvcache
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard.common import Quality
from sickbeard.common import user_agents
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickbeard import show_name_helpers
class GenericProvider:
NZB = "nzb"
TORRENT = "torrent"
def __init__(self, name):
# these need to be set in the subclass
self.providerType = None
self.name = name
self.proxy = ProviderProxy()
self.proxyGlypeProxySSLwarning = None
self.urls = {}
self.url = ''
self.public = False
self.show = None
self.supportsBacklog = False
self.supportsAbsoluteNumbering = False
self.anime_only = False
self.search_mode = None
self.search_fallback = False
self.enabled = False
self.enable_daily = False
self.enable_backlog = False
self.cache = tvcache.TVCache(self)
self.session = requests.Session()
shuffle(user_agents)
self.headers = {'User-Agent': user_agents[0]}
self.btCacheURLS = [
'http://torcache.net/torrent/{torrent_hash}.torrent',
'http://thetorrent.org/torrent/{torrent_hash}.torrent',
'http://btdig.com/torrent/{torrent_hash}.torrent',
# 'http://torrage.com/torrent/{torrent_hash}.torrent',
# 'http://itorrents.org/torrent/{torrent_hash}.torrent',
]
shuffle(self.btCacheURLS)
self.proper_strings = ['PROPER|REPACK']
def getID(self):
return GenericProvider.makeID(self.name)
@staticmethod
def makeID(name):
return re.sub(r"[^\w\d_]", "_", name.strip().lower())
def imageName(self):
return self.getID() + '.png'
def _checkAuth(self):
return True
def _doLogin(self):
return True
def isActive(self):
if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
return self.isEnabled()
elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
return self.isEnabled()
else:
return False
def isEnabled(self):
"""
This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
"""
return False
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
if self.providerType == GenericProvider.NZB:
result = classes.NZBSearchResult(episodes)
elif self.providerType == GenericProvider.TORRENT:
result = classes.TorrentSearchResult(episodes)
else:
result = classes.SearchResult(episodes)
result.provider = self
return result
def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
if self.proxy.isEnabled():
self.headers.update({'Referer': self.proxy.getProxyURL()})
self.proxyGlypeProxySSLwarning = self.proxy.getProxyURL() + 'includes/process.php?action=sslagree&submit=Continue anyway...'
else:
if 'Referer' in self.headers:
self.headers.pop('Referer')
self.proxyGlypeProxySSLwarning = None
return helpers.getURL(self.proxy._buildURL(url), post_data=post_data, params=params, headers=self.headers, timeout=timeout,
session=self.session, json=json, proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning)
def _makeURL(self, result):
urls = []
filename = u''
if result.url.startswith('magnet'):
try:
torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper()
try:
torrent_name = re.findall('dn=([^&]+)', result.url)[0]
except:
torrent_name = 'NO_DOWNLOAD_NAME'
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.btCacheURLS]
except:
logger.log("Unable to extract torrent hash or name from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
else:
urls = [result.url]
if self.providerType == GenericProvider.TORRENT:
filename = ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
elif self.providerType == GenericProvider.NZB:
filename = ek(os.path.join, sickbeard.NZB_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
return urls, filename
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
if self.proxy.isEnabled():
self.headers.update({'Referer': self.proxy.getProxyURL()})
elif 'Referer' in self.headers:
self.headers.pop('Referer')
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
if not self.proxy.isEnabled() and url.startswith('http'):
# Let's just set a referer for every .torrent/.nzb, should work as a cover-all without side-effects
self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})
logger.log(u"Downloading a result from " + self.name + " at " + url)
# Support for Jackett/TorzNab
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT
if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers._remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
def _verify_download(self, file_name=None):
"""
Checks the saved file to see if it was actually valid, if not then consider the download a failure.
"""
# primitive verification of torrents, just make sure we didn't get a text file or something
if file_name.endswith(GenericProvider.TORRENT):
try:
parser = createParser(file_name)
if parser:
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
logger.log(u"Result is not a valid torrent file", logger.DEBUG)
return False
return True
def searchRSS(self, episodes):
return self.cache.findNeededEpisodes(episodes)
def getQuality(self, item, anime=False):
"""
Figures out the quality of the given RSS item node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item)
quality = Quality.sceneQuality(title, anime)
return quality
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
return []
def _get_season_search_strings(self, episode):
return []
def _get_episode_search_strings(self, eb_obj, add_string=''):
return []
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.get('title', '')
if title:
title = u'' + title.replace(' ', '.')
url = item.get('link', '')
if url:
url = url.replace('&', '&').replace('%26tr%3D', '&tr=')
return title, url
def _get_size(self, item):
"""Gets the size from the item"""
logger.log(u"Provider type doesn't have _get_size() implemented yet", logger.ERROR)
return -1
def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
self._checkAuth()
self.show = show
results = {}
itemList = []
searched_scene_season = None
for epObj in episodes:
# search cache for episode result
cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
if cacheResult:
if epObj.episode not in results:
results[epObj.episode] = cacheResult
else:
results[epObj.episode].extend(cacheResult)
# found result, search next episode
continue
# skip if season already searched
if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
continue
# mark season searched for season pack searches so we can skip later on
searched_scene_season = epObj.scene_season
search_strings = []
if len(episodes) > 1 and search_mode == 'sponly':
# get season search results
search_strings = self._get_season_search_strings(epObj)
elif search_mode == 'eponly':
# get single episode search results
search_strings = self._get_episode_search_strings(epObj)
first = search_strings and isinstance(search_strings[0], dict) and 'rid' in search_strings[0]
if first:
logger.log(u'First search_string has rid', logger.DEBUG)
for curString in search_strings:
itemList += self._doSearch(curString, search_mode, len(episodes), epObj=epObj)
if first:
first = False
if itemList:
logger.log(u'First search_string had rid, and returned results, skipping query by string', logger.DEBUG)
break
else:
logger.log(u'First search_string had rid, but returned no results, searching with string query', logger.DEBUG)
# if we found what we needed already from cache then return results and exit
if len(results) == len(episodes):
return results
# sort list by quality
if len(itemList):
items = {}
itemsUnknown = []
for item in itemList:
quality = self.getQuality(item, anime=show.is_anime)
if quality == Quality.UNKNOWN:
itemsUnknown += [item]
else:
if quality not in items:
items[quality] = [item]
else:
items[quality].append(item)
itemList = list(itertools.chain(*[v for (k, v) in sorted(items.iteritems(), reverse=True)]))
itemList += itemsUnknown if itemsUnknown else []
# filter results
cl = []
for item in itemList:
(title, url) = self._get_title_and_url(item)
# parse the file name
try:
myParser = NameParser(False)
parse_result = myParser.parse(title)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
continue
except InvalidShowException:
logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
continue
showObj = parse_result.show
quality = parse_result.quality
release_group = parse_result.release_group
version = parse_result.version
addCacheEntry = False
if not (showObj.air_by_date or showObj.sports):
if search_mode == 'sponly':
if len(parse_result.episode_numbers):
logger.log(
u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
logger.DEBUG)
addCacheEntry = True
if len(parse_result.episode_numbers) and (
parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
ep.scene_episode in parse_result.episode_numbers]):
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
else:
if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
if not addCacheEntry:
# we just use the existing info for normal searches
actual_season = parse_result.season_number
actual_episodes = parse_result.episode_numbers
else:
if not parse_result.is_air_by_date:
logger.log(
u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
logger.DEBUG)
addCacheEntry = True
else:
airdate = parse_result.air_date.toordinal()
myDB = db.DBConnection()
sql_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
[showObj.indexerid, airdate])
if len(sql_results) != 1:
logger.log(
u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
logger.WARNING)
addCacheEntry = True
if not addCacheEntry:
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
# add parsed result to cache for usage later on
if addCacheEntry:
logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
if ci is not None:
cl.append(ci)
continue
# make sure we want the episode
wantEp = True
for epNo in actual_episodes:
if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
wantEp = False
break
if not wantEp:
logger.log(
u"Ignoring result " + title + " because we don't want an episode that is " +
Quality.qualityStrings[
quality], logger.INFO)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
# make a result object
epObj = []
for curEp in actual_episodes:
epObj.append(showObj.getEpisode(actual_season, curEp))
result = self.getResult(epObj)
result.show = showObj
result.url = url
result.name = title
result.quality = quality
result.release_group = release_group
result.version = version
result.content = None
result.size = self._get_size(item)
if len(epObj) == 1:
epNum = epObj[0].episode
logger.log(u"Single episode result.", logger.DEBUG)
elif len(epObj) > 1:
epNum = MULTI_EP_RESULT
logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
parse_result.episode_numbers), logger.DEBUG)
elif len(epObj) == 0:
epNum = SEASON_RESULT
logger.log(u"Separating full season result to check for later", logger.DEBUG)
if epNum not in results:
results[epNum] = [result]
else:
results[epNum].append(result)
# check if we have items to add to cache
if len(cl) > 0:
myDB = self.cache._getDB()
myDB.mass_action(cl)
return results
def findPropers(self, search_date=None):
results = self.cache.listPropers(search_date)
return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
results]
def seedRatio(self):
'''
Provider should override this value if custom seed ratio enabled
It should return the value of the provider seed ratio
'''
return ''
class NZBProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.NZB
def _get_size(self, item):
try:
size = item.get('links')[1].get('length', -1)
except IndexError:
size = -1
if not size:
logger.log(u"Size was not found in your provider response", logger.DEBUG)
return int(size)
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.TORRENT
def _get_title_and_url(self, item):
from feedparser.feedparser import FeedParserDict
if isinstance(item, (dict, FeedParserDict)):
title = item.get('title', '')
download_url = item.get('url', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
title = item[0]
download_url = item[1]
# Temp global block `DIAMOND` releases
if title.endswith('DIAMOND'):
logger.log(u'Skipping DIAMOND release for mass fake releases.')
title = download_url = u'FAKERELEASE'
if title:
title = self._clean_title_from_provider(title)
if download_url:
download_url = download_url.replace('&', '&')
return (title, download_url)
def _get_size(self, item):
size = -1
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024*1024:
size = -1
return size
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName.SXX
search_string['Season'].append(ep_string.encode('utf-8').strip())
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name + ' '
if ep_obj.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', ' ')
elif ep_obj.show.sports:
ep_string += str(ep_obj.airdate).replace('-', ' ') + ('|', ' ')[len(self.proper_strings) > 1] + ep_obj.airdate.strftime('%b')
elif ep_obj.show.anime:
ep_string += "%02d" % int(ep_obj.scene_absolute_number)
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string = ep_string + ' %s' % add_string
search_string['Episode'].append(ep_string.encode('utf-8').strip())
return [search_string]
def _clean_title_from_provider(self, title):
return (title or '').replace(' ', '.')
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST]) + ')'
)
for sqlshow in sqlResults or []:
show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if show:
curEp = show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
for term in self.proper_strings:
searchString = self._get_episode_search_strings(curEp, add_string=term)
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), show))
return results
class ProviderProxy:
def __init__(self):
self.Type = 'GlypeProxy'
self.param = 'browse.php?u='
self.option = '&b=32&f=norefer'
self.enabled = False
self.url = None
self.urls = {
'getprivate.eu (NL)': 'http://getprivate.eu/',
'hideme.nl (NL)': 'http://hideme.nl/',
'proxite.eu (DE)': 'http://proxite.eu/',
'interproxy.net (EU)': 'http://interproxy.net/',
}
def isEnabled(self):
""" Return True if we Choose to call TPB via Proxy """
return self.enabled
def getProxyURL(self):
""" Return the Proxy URL Choosen via Provider Setting """
return str(self.url)
def _buildURL(self, url):
""" Return the Proxyfied URL of the page """
if self.isEnabled():
url = self.getProxyURL() + self.param + urllib.quote_plus(url.encode('UTF-8')) + self.option
logger.log(u"Proxified URL: " + url, logger.DEBUG)
return url
def _buildRE(self, regx):
""" Return the Proxyfied RE string """
if self.isEnabled():
regx = re.sub('//1', self.option, regx).replace('&', '&')
logger.log(u"Proxified REGEX: " + regx, logger.DEBUG)
else:
regx = re.sub('//1', '', regx)
return regx
|
gpl-3.0
| -8,025,768,007,482,514,000
| 37.234043
| 189
| 0.555147
| false
| 4.321789
| false
| false
| false
|
codingforentrepreneurs/digital-marketplace
|
src/sellers/mixins.py
|
1
|
1520
|
import datetime
from django.db.models import Count, Min, Sum, Avg, Max
from billing.models import Transaction
from digitalmarket.mixins import LoginRequiredMixin
from products.models import Product
from .models import SellerAccount
class SellerAccountMixin(LoginRequiredMixin, object):
account = None
products = []
transactions = []
def get_account(self):
user = self.request.user
accounts = SellerAccount.objects.filter(user=user)
if accounts.exists() and accounts.count() == 1:
self.account = accounts.first()
return accounts.first()
return None
def get_products(self):
account = self.get_account()
products = Product.objects.filter(seller=account)
self.products = products
return products
def get_transactions(self):
products = self.get_products()
transactions = Transaction.objects.filter(product__in=products)
return transactions
def get_transactions_today(self):
today = datetime.date.today()
today_min = datetime.datetime.combine(today, datetime.time.min)
today_max = datetime.datetime.combine(today, datetime.time.max)
return self.get_transactions().filter(timestamp__range=(today_min, today_max))
def get_total_sales(self):
transactions = self.get_transactions().aggregate(Sum("price"), Avg("price"))
print transactions
total_sales = transactions["price__sum"]
return total_sales
def get_today_sales(self):
transactions = self.get_transactions_today().aggregate(Sum("price"))
total_sales = transactions["price__sum"]
return total_sales
|
mit
| 8,608,003,941,597,075,000
| 26.142857
| 80
| 0.748684
| false
| 3.559719
| false
| false
| false
|
atlasapi/atlas-deer
|
atlas-api/src/main/python/generate-load-test-urls.py
|
1
|
3768
|
#!/usr/bin/env python
# ./generate-load-test-urls.py --number-of-urls=100 --atlas-url=stage.atlas.metabroadcast.com --target-host=host-to-test --api-key=api-key --source=pressassociation.com --num-channels-source=100 --num-channels=10 --platform=hkyn --start-date=2015-02-01 --end-date=2015-02-10
import argparse
import datetime
import dateutil.parser
import httplib
import json
import random
arg_parser = argparse.ArgumentParser(description='Generate URL for load testing')
arg_parser.add_argument('--number-of-urls', required=True, dest='n', type=int, metavar='n', help='Number of url to generate')
arg_parser.add_argument('--atlas-url', required=True, dest='atlas_url', metavar='atlas_url', help='Atlas host')
arg_parser.add_argument('--target-host', required=True, dest='target_host', metavar='target_host', help='Target host')
arg_parser.add_argument('--api-key', required=True, dest='api_key', metavar='api_key', help='Atlas API key')
arg_parser.add_argument('--num-channels-source', required=True, type=int, dest='num_channels_source', metavar='num_channels_source', help='Number of channels to choose from')
arg_parser.add_argument('--num-channels', required=True, type=int, dest='num_channels', metavar='num_channels', help='Number of channels to use in request')
arg_parser.add_argument('--platform', required=True, dest='platform', metavar='platform', help='platform')
arg_parser.add_argument('--source', required=True, metavar='source', help='source of the schedules to bootstrap')
arg_parser.add_argument('--start-date', required=True, metavar='start_date', help='Start date')
arg_parser.add_argument('--end-date', required=True, metavar='end_date', help='Start date')
args = arg_parser.parse_args()
args.start_date = dateutil.parser.parse(args.start_date)
args.end_date = dateutil.parser.parse(args.end_date)
class Atlas:
def __init__(self, host, port):
self.host = host
self.port = port
def get(self, resource):
conn = httplib.HTTPConnection(self.host, self.port)
request = "GET http://%s:%s%s" % (self.host, self.port, resource)
conn.request('GET', resource)
resp = conn.getresponse()
if not resp.status == 200:
if resp.status == 400:
print "request failed for %s: %s" % (resource, resp.reason)
if resp.status == 404:
print "resource %s doesn't appear to exist" % (resource)
if resp.status >= 500:
print "problem with %s? %s %s" % (self.host, resp.status, resp.reason)
resp.read()
conn.close()
sys.exit()
body = resp.read()
try:
response = json.loads(body)
except Exception as e:
print "couldn't decode response to %s: %s" % (request, e)
print body
sys.exit()
return (request, response)
atlas = Atlas(args.atlas_url, 80)
req, platform = atlas.get("/4/channel_groups/%s.json?key=%s&annotations=channels" % (args.platform, args.api_key))
def get_days(start,end):
ds = []
cur = start
while cur <= end:
ds.append(cur)
cur = cur + datetime.timedelta(1)
return ds
channels = map((lambda c: c['channel']['id']),platform['channel_group']['channels'][:args.num_channels_source])
days = get_days(args.start_date, args.end_date)
for x in range(0, args.n):
channels_string = ",".join(random.sample(channels, args.num_channels))
day = random.choice(days)
print "/4/schedules.json?id=%s&annotations=channel,content_detail&from=%s&to=%s&key=%s&source=%s" % (
# args.target_host,
channels_string,
day.isoformat(),
(day + datetime.timedelta(1)).isoformat(),
args.api_key,
args.source
)
|
apache-2.0
| -617,069,677,216,576,400
| 41.818182
| 275
| 0.650743
| false
| 3.466421
| false
| false
| false
|
Rhoana/butterfly
|
bfly/CoreLayer/AccessLayer/Websocket.py
|
1
|
2717
|
import yaml
import json
import logging as log
import tornado.websocket
from QueryLayer import InfoQuery
from RequestHandler import RequestHandler
from NDStore import get_config
websockets = []
class Websocket(tornado.websocket.WebSocketHandler):
INPUT = RequestHandler.INPUT
RUNTIME = RequestHandler.RUNTIME
OUTPUT = RequestHandler.OUTPUT
OPEN_API = [
'token',
'channel',
]
def initialize(self, _core, _db, _config, _root=''):
self.core = _core;
self.BFLY_CONFIG = _config
# Get keys for interface
error_key = self.RUNTIME.IMAGE.ERROR.NAME
format_key = self.INPUT.INFO.FORMAT.NAME
method_key = self.INPUT.METHODS.NAME
# Initializae empty query
self.query = InfoQuery(**{
method_key: 'websocket:restore',
format_key: 'json',
error_key: '',
})
def check_origin(self, origin):
# Allow anyone to send messages
return True
def open(self, request, **kwargs):
# Get the path keywords
args = request.split('/')
keywords = dict(zip(self.OPEN_API, args))
# Get path information from token
config = get_config(self.BFLY_CONFIG, keywords, True)
# Update the query with the parameters
self.query.update_keys(config)
# Get message from the core
content = self.core.get_info(self.query)
# Send welcome only via this websocket
self.write_message(content)
# Add to list
if self not in websockets:
websockets.append(self)
def on_close(self):
# Remove from list
if self in websockets:
websockets.remove(self)
def on_message(self, json_msg):
# Interpret the message
message = json.loads(json_msg)
# Get keys for interface
method_key = self.INPUT.METHODS.NAME
error_key = self.RUNTIME.IMAGE.ERROR.NAME
# Get current method
action_val = message.get('action', '')
method_val = 'websocket:{}'.format(action_val)
# Set the action from the message
self.query.update_keys({
method_key: method_val,
error_key: '',
})
# Log request
log_msg = {'Incoming Message': message}
log.warning(yaml.safe_dump(log_msg))
# Get reply from the core
reply = self.core.get_edits(self.query, message)
self.send(reply)
def send(self, message):
# Log response
log_msg = """Outgoing Broadcast:
{}""".format(message)
log.warning(log_msg)
# Send to all in list
for ws in websockets:
ws.write_message(message)
|
mit
| 5,140,380,614,656,622,000
| 28.532609
| 61
| 0.596982
| false
| 4.061286
| true
| false
| false
|
kronenpj/python-for-android
|
ci/constants.py
|
1
|
2655
|
from enum import Enum
class TargetPython(Enum):
python2 = 0
python3crystax = 1
python3 = 2
# recipes that currently break the build
# a recipe could be broken for a target Python and not for the other,
# hence we're maintaining one list per Python target
BROKEN_RECIPES_PYTHON2 = set([
# pythonhelpers.h:12:18: fatal error: string: No such file or directory
'atom',
# https://github.com/kivy/python-for-android/issues/550
'audiostream',
'brokenrecipe',
'evdev',
# distutils.errors.DistutilsError
# Could not find suitable distribution for Requirement.parse('cython')
'ffpyplayer',
'flask',
'groestlcoin_hash',
'hostpython3crystax',
# https://github.com/kivy/python-for-android/issues/1354
'kiwisolver',
'libmysqlclient',
'libsecp256k1',
'libtribler',
'ndghttpsclient',
'm2crypto',
# ImportError: No module named setuptools
'netifaces',
'Pillow',
# depends on cffi that still seems to have compilation issues
'protobuf_cpp',
'xeddsa',
'x3dh',
'pynacl',
'doubleratchet',
'omemo',
# requires `libpq-dev` system dependency e.g. for `pg_config` binary
'psycopg2',
# most likely some setup in the Docker container, because it works in host
'pyjnius', 'pyopenal',
'pyproj',
'pysdl2',
'pyzmq',
'secp256k1',
'shapely',
# mpmath package with a version >= 0.19 required
'sympy',
'twisted',
'vlc',
'websocket-client',
'zeroconf',
'zope',
'matplotlib', # https://github.com/kivy/python-for-android/issues/1900
])
BROKEN_RECIPES_PYTHON3 = set([
'brokenrecipe',
# enum34 is not compatible with Python 3.6 standard library
# https://stackoverflow.com/a/45716067/185510
'enum34',
# build_dir = glob.glob('build/lib.*')[0]
# IndexError: list index out of range
'secp256k1',
'ffpyplayer',
'icu',
# requires `libpq-dev` system dependency e.g. for `pg_config` binary
'psycopg2',
'protobuf_cpp',
# most likely some setup in the Docker container, because it works in host
'pyjnius', 'pyopenal',
# SyntaxError: invalid syntax (Python2)
'storm',
# mpmath package with a version >= 0.19 required
'sympy',
'vlc',
'matplotlib', # https://github.com/kivy/python-for-android/issues/1900
])
BROKEN_RECIPES = {
TargetPython.python2: BROKEN_RECIPES_PYTHON2,
TargetPython.python3: BROKEN_RECIPES_PYTHON3,
}
# recipes that were already built will be skipped
CORE_RECIPES = set([
'pyjnius', 'kivy', 'openssl', 'requests', 'sqlite3', 'setuptools',
'numpy', 'android', 'python2', 'python3',
])
|
mit
| 1,589,547,520,252,471,800
| 27.858696
| 78
| 0.652354
| false
| 3.310474
| false
| false
| false
|
NarlikarLab/DIVERSITY
|
plotFigures.py
|
1
|
2017
|
##################### DIVERSITY #####################
# DIVERSITY is a tool to explore multiple ways of protein-DNA
# binding in the genome. More information can be found in the README file.
# Copyright (C) 2015 Sneha Mitra, Anushua Biswas and Leelavati Narlikar
# DIVERSITY is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# DIVERSITY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
######################################################
# Plot likelihood values
import os
from config import *
# plot a single likelihood file
def plotSingleFile(d, dirname):
f1 = dirname + "/" + likelihoodFile
f2 = dirname + "/" + likelihoodPlotFile
os.system("gnuplot -e 'filename=\"" + f1 + "\"; var=\"" + f2 + "\"' " + d['-v'])
os.system("rm " + f1)
# plot likelihood for all modes in different files
def plotLikelihood(d):
for i in range(d['-minMode'], d['-maxMode'] + 1):
f1 = d['-o'][1] + "/" + modeDir.format(str(i)) + "/" + likelihoodFile
f2 = d['-o'][1] + "/" + modeDir.format(str(i)) + "/" + likelihoodPlotFile
os.system("gnuplot -e 'filename=\"" + f1 + "\"; var=\"" + f2 + "\"' " + d['-v'])
os.system("rm " + f1)
def plotLikelihoodMode(d, mode):
f1 = d['-o'][1] + "/" + modeDir.format(str(mode)) + "/" + likelihoodFile
f2 = d['-o'][1] + "/" + modeDir.format(str(mode)) + "/" + likelihoodPlotFile
os.system("gnuplot -e 'filename=\"" + f1 + "\"; var=\"" + f2 + "\"' " + d['-v'])
os.system("rm " + f1)
|
gpl-3.0
| 5,421,648,628,846,587,000
| 41.914894
| 88
| 0.597422
| false
| 3.344942
| false
| false
| false
|
adzanette/scf-extractor
|
scf-extractor/lib/peewee.py
|
1
|
67648
|
# (\
# ( \ /(o)\ caw!
# ( \/ ()/ /)
# ( `;.))'".)
# `(/////.-'
# =====))=))===()
# ///'
# //
# '
from __future__ import with_statement
import datetime
import decimal
import logging
import operator
import os
import re
import threading
import time
from collections import deque, namedtuple
from copy import deepcopy
__all__ = [
'IntegerField', 'BigIntegerField', 'PrimaryKeyField', 'FloatField', 'DoubleField',
'DecimalField', 'CharField', 'TextField', 'DateTimeField', 'DateField', 'TimeField',
'BooleanField', 'ForeignKeyField', 'Model', 'DoesNotExist', 'ImproperlyConfigured',
'DQ', 'fn', 'SqliteDatabase', 'MySQLDatabase', 'PostgresqlDatabase', 'Field',
'JOIN_LEFT_OUTER', 'JOIN_INNER', 'JOIN_FULL',
]
try:
import sqlite3
except ImportError:
sqlite3 = None
try:
import psycopg2
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql
except ImportError:
try:
import pymysql as mysql
except ImportError:
mysql = None
class ImproperlyConfigured(Exception):
pass
if sqlite3 is None and psycopg2 is None and mysql is None:
raise ImproperlyConfigured('Either sqlite3, psycopg2 or MySQLdb must be installed')
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, str)
sqlite3.register_adapter(datetime.date, str)
sqlite3.register_adapter(datetime.time, str)
sqlite3.register_converter('decimal', lambda v: decimal.Decimal(v))
if psycopg2:
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
logger = logging.getLogger('peewee')
OP_AND = 0
OP_OR = 1
OP_ADD = 10
OP_SUB = 11
OP_MUL = 12
OP_DIV = 13
OP_AND = 14
OP_OR = 15
OP_XOR = 16
OP_USER = 19
OP_EQ = 20
OP_LT = 21
OP_LTE = 22
OP_GT = 23
OP_GTE = 24
OP_NE = 25
OP_IN = 26
OP_IS = 27
OP_LIKE = 28
OP_ILIKE = 29
DJANGO_MAP = {
'eq': OP_EQ,
'lt': OP_LT,
'lte': OP_LTE,
'gt': OP_GT,
'gte': OP_GTE,
'ne': OP_NE,
'in': OP_IN,
'is': OP_IS,
'like': OP_LIKE,
'ilike': OP_ILIKE,
}
JOIN_INNER = 1
JOIN_LEFT_OUTER = 2
JOIN_FULL = 3
def dict_update(orig, extra):
new = {}
new.update(orig)
new.update(extra)
return new
class Leaf(object):
def __init__(self):
self.negated = False
self._alias = None
def __invert__(self):
self.negated = not self.negated
return self
def alias(self, a):
self._alias = a
return self
def asc(self):
return Ordering(self, True)
def desc(self):
return Ordering(self, False)
def _e(op, inv=False):
def inner(self, rhs):
if inv:
return Expr(rhs, op, self)
return Expr(self, op, rhs)
return inner
__and__ = _e(OP_AND)
__or__ = _e(OP_OR)
__add__ = _e(OP_ADD)
__sub__ = _e(OP_SUB)
__mul__ = _e(OP_MUL)
__div__ = _e(OP_DIV)
__xor__ = _e(OP_XOR)
__radd__ = _e(OP_ADD, inv=True)
__rsub__ = _e(OP_SUB, inv=True)
__rmul__ = _e(OP_MUL, inv=True)
__rdiv__ = _e(OP_DIV, inv=True)
__rand__ = _e(OP_AND, inv=True)
__ror__ = _e(OP_OR, inv=True)
__rxor__ = _e(OP_XOR, inv=True)
__eq__ = _e(OP_EQ)
__lt__ = _e(OP_LT)
__le__ = _e(OP_LTE)
__gt__ = _e(OP_GT)
__ge__ = _e(OP_GTE)
__ne__ = _e(OP_NE)
__lshift__ = _e(OP_IN)
__rshift__ = _e(OP_IS)
__mod__ = _e(OP_LIKE)
__pow__ = _e(OP_ILIKE)
class Expr(Leaf):
def __init__(self, lhs, op, rhs, negated=False):
super(Expr, self).__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
self.negated = negated
def clone(self):
return Expr(self.lhs, self.op, self.rhs, self.negated)
class DQ(Leaf):
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
def clone(self):
return DQ(**self.query)
class Param(Leaf):
def __init__(self, data):
self.data = data
super(Param, self).__init__()
class Func(Leaf):
def __init__(self, name, *params):
self.name = name
self.params = params
super(Func, self).__init__()
def clone(self):
return Func(self.name, *self.params)
def __getattr__(self, attr):
def dec(*args, **kwargs):
return Func(attr, *args, **kwargs)
return dec
fn = Func(None)
class FieldDescriptor(object):
def __init__(self, field):
self.field = field
self.att_name = self.field.name
def __get__(self, instance, instance_type=None):
if instance:
return instance._data.get(self.att_name)
return self.field
def __set__(self, instance, value):
instance._data[self.att_name] = value
Ordering = namedtuple('Ordering', ('param', 'asc'))
R = namedtuple('R', ('value',))
class Field(Leaf):
_field_counter = 0
_order = 0
db_field = 'unknown'
template = '%(column_type)s'
def __init__(self, null=False, index=False, unique=False, verbose_name=None,
help_text=None, db_column=None, default=None, choices=None,
primary_key=False, sequence=None, *args, **kwargs):
self.null = null
self.index = index
self.unique = unique
self.verbose_name = verbose_name
self.help_text = help_text
self.db_column = db_column
self.default = default
self.choices = choices
self.primary_key = primary_key
self.sequence = sequence
self.attributes = self.field_attributes()
self.attributes.update(kwargs)
Field._field_counter += 1
self._order = Field._field_counter
super(Field, self).__init__()
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
self.db_column = self.db_column or self.name
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
setattr(model_class, name, FieldDescriptor(self))
def field_attributes(self):
return {}
def get_db_field(self):
return self.db_field
def coerce(self, value):
return value
def db_value(self, value):
return value if value is None else self.coerce(value)
def python_value(self, value):
return value if value is None else self.coerce(value)
class IntegerField(Field):
db_field = 'int'
def coerce(self, value):
return int(value)
class BigIntegerField(IntegerField):
db_field = 'bigint'
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
def __init__(self, *args, **kwargs):
kwargs['primary_key'] = True
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
db_field = 'float'
def coerce(self, value):
return float(value)
class DoubleField(FloatField):
db_field = 'double'
class DecimalField(Field):
db_field = 'decimal'
template = '%(column_type)s(%(max_digits)d, %(decimal_places)d)'
def field_attributes(self):
return {
'max_digits': 10,
'decimal_places': 5,
'auto_round': False,
'rounding': decimal.DefaultContext.rounding,
}
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.attributes['auto_round']:
exp = D(10)**(-self.attributes['decimal_places'])
return D(str(value)).quantize(exp, rounding=self.attributes['rounding'])
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
def format_unicode(s, encoding='utf-8'):
if isinstance(s, unicode):
return s
elif isinstance(s, basestring):
return s.decode(encoding)
elif hasattr(s, '__unicode__'):
return s.__unicode__()
else:
return unicode(bytes(s), encoding)
class CharField(Field):
db_field = 'string'
template = '%(column_type)s(%(max_length)s)'
def field_attributes(self):
return {'max_length': 255}
def coerce(self, value):
value = format_unicode(value or '')
return value[:self.attributes['max_length']]
class TextField(Field):
db_field = 'text'
def coerce(self, value):
return format_unicode(value or '')
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
class DateTimeField(Field):
db_field = 'datetime'
def field_attributes(self):
return {
'formats': [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
return format_date_time(value, self.attributes['formats'])
return value
class DateField(Field):
db_field = 'date'
def field_attributes(self):
return {
'formats': [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.attributes['formats'], pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
class TimeField(Field):
db_field = 'time'
def field_attributes(self):
return {
'formats': [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.attributes['formats'], pp)
elif value and isinstance(value, datetime.datetime):
return value.time()
return value
class BooleanField(Field):
db_field = 'bool'
def coerce(self, value):
return bool(value)
class RelationDescriptor(FieldDescriptor):
def __init__(self, field, rel_model):
self.rel_model = rel_model
super(RelationDescriptor, self).__init__(field)
def get_object_or_id(self, instance):
rel_id = instance._data.get(self.att_name)
if rel_id is not None or self.att_name in instance._obj_cache:
if self.att_name not in instance._obj_cache:
obj = self.rel_model.get(self.rel_model._meta.primary_key==rel_id)
instance._obj_cache[self.att_name] = obj
return instance._obj_cache[self.att_name]
elif not self.field.null:
raise self.rel_model.DoesNotExist
return rel_id
def __get__(self, instance, instance_type=None):
if instance:
return self.get_object_or_id(instance)
return self.field
def __set__(self, instance, value):
if isinstance(value, self.rel_model):
instance._data[self.att_name] = value.get_id()
instance._obj_cache[self.att_name] = value
else:
instance._data[self.att_name] = value
class ReverseRelationDescriptor(object):
def __init__(self, field):
self.field = field
self.rel_model = field.model_class
def __get__(self, instance, instance_type=None):
if instance:
return self.rel_model.select().where(self.field==instance.get_id())
return self
class ForeignKeyField(IntegerField):
def __init__(self, rel_model, null=False, related_name=None, cascade=False, extra=None, *args, **kwargs):
self.rel_model = rel_model
self._related_name = related_name
self.cascade = cascade
self.extra = extra
kwargs.update(dict(
cascade='ON DELETE CASCADE' if self.cascade else '',
extra=extra or '',
))
super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
self.db_column = self.db_column or '%s_id' % self.name
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
self.related_name = self._related_name or '%s_set' % (model_class._meta.name)
if self.rel_model == 'self':
self.rel_model = self.model_class
if self.related_name in self.rel_model._meta.fields:
raise AttributeError('Foreign key: %s.%s related name "%s" collision with field of same name' % (
self.model_class._meta.name, self.name, self.related_name))
setattr(model_class, name, RelationDescriptor(self, self.rel_model))
setattr(self.rel_model, self.related_name, ReverseRelationDescriptor(self))
model_class._meta.rel[self.name] = self
self.rel_model._meta.reverse_rel[self.related_name] = self
def get_db_field(self):
to_pk = self.rel_model._meta.primary_key
if not isinstance(to_pk, PrimaryKeyField):
return to_pk.get_db_field()
return super(ForeignKeyField, self).get_db_field()
def coerce(self, value):
return self.rel_model._meta.primary_key.coerce(value)
def db_value(self, value):
if isinstance(value, self.rel_model):
value = value.get_id()
return self.rel_model._meta.primary_key.db_value(value)
class QueryCompiler(object):
field_map = {
'int': 'INTEGER',
'bigint': 'INTEGER',
'float': 'REAL',
'double': 'REAL',
'decimal': 'DECIMAL',
'string': 'VARCHAR',
'text': 'TEXT',
'datetime': 'DATETIME',
'date': 'DATE',
'time': 'TIME',
'bool': 'SMALLINT',
'primary_key': 'INTEGER',
}
op_map = {
OP_EQ: '=',
OP_LT: '<',
OP_LTE: '<=',
OP_GT: '>',
OP_GTE: '>=',
OP_NE: '!=',
OP_IN: 'IN',
OP_IS: 'IS',
OP_LIKE: 'LIKE',
OP_ILIKE: 'ILIKE',
OP_ADD: '+',
OP_SUB: '-',
OP_MUL: '*',
OP_DIV: '/',
OP_XOR: '^',
OP_AND: 'AND',
OP_OR: 'OR',
}
join_map = {
JOIN_INNER: 'INNER',
JOIN_LEFT_OUTER: 'LEFT OUTER',
JOIN_FULL: 'FULL',
}
def __init__(self, quote_char='"', interpolation='?', field_overrides=None,
op_overrides=None):
self.quote_char = quote_char
self.interpolation = interpolation
self._field_map = dict_update(self.field_map, field_overrides or {})
self._op_map = dict_update(self.op_map, op_overrides or {})
def quote(self, s):
return ''.join((self.quote_char, s, self.quote_char))
def get_field(self, f):
return self._field_map[f]
def get_op(self, q):
return self._op_map[q]
def _max_alias(self, am):
max_alias = 0
if am:
for a in am.values():
i = int(a.lstrip('t'))
if i > max_alias:
max_alias = i
return max_alias + 1
def parse_expr(self, expr, alias_map=None):
s = self.interpolation
p = [expr]
if isinstance(expr, Expr):
lhs, lparams = self.parse_expr(expr.lhs, alias_map)
rhs, rparams = self.parse_expr(expr.rhs, alias_map)
s = '(%s %s %s)' % (lhs, self.get_op(expr.op), rhs)
p = lparams + rparams
elif isinstance(expr, Field):
s = self.quote(expr.db_column)
if alias_map and expr.model_class in alias_map:
s = '.'.join((alias_map[expr.model_class], s))
p = []
elif isinstance(expr, Func):
p = []
exprs = []
for param in expr.params:
parsed, params = self.parse_expr(param, alias_map)
exprs.append(parsed)
p.extend(params)
s = '%s(%s)' % (expr.name, ', '.join(exprs))
elif isinstance(expr, Param):
s = self.interpolation
p = [expr.data]
elif isinstance(expr, Ordering):
s, p = self.parse_expr(expr.param, alias_map)
s += ' ASC' if expr.asc else ' DESC'
elif isinstance(expr, R):
s = expr.value
p = []
elif isinstance(expr, SelectQuery):
max_alias = self._max_alias(alias_map)
clone = expr.clone()
if not expr._explicit_selection:
clone._select = (clone.model_class._meta.primary_key,)
subselect, p = self.parse_select_query(clone, max_alias, alias_map)
s = '(%s)' % subselect
elif isinstance(expr, (list, tuple)):
exprs = []
p = []
for i in expr:
e, v = self.parse_expr(i, alias_map)
exprs.append(e)
p.extend(v)
s = '(%s)' % ','.join(exprs)
elif isinstance(expr, Model):
s = self.interpolation
p = [expr.get_id()]
if isinstance(expr, Leaf):
if expr.negated:
s = 'NOT %s' % s
if expr._alias:
s = ' '.join((s, 'AS', expr._alias))
return s, p
def parse_query_node(self, qnode, alias_map):
if qnode is not None:
return self.parse_expr(qnode, alias_map)
return '', []
def parse_joins(self, joins, model_class, alias_map):
parsed = []
seen = set()
def _traverse(curr):
if curr not in joins or curr in seen:
return
seen.add(curr)
for join in joins[curr]:
from_model = curr
to_model = join.model_class
field = from_model._meta.rel_for_model(to_model, join.on)
if field:
left_field = field.db_column
right_field = to_model._meta.primary_key.db_column
else:
field = to_model._meta.rel_for_model(from_model, join.on)
left_field = from_model._meta.primary_key.db_column
right_field = field.db_column
join_type = join.join_type or JOIN_INNER
lhs = '%s.%s' % (alias_map[from_model], self.quote(left_field))
rhs = '%s.%s' % (alias_map[to_model], self.quote(right_field))
parsed.append('%s JOIN %s AS %s ON %s = %s' % (
self.join_map[join_type],
self.quote(to_model._meta.db_table),
alias_map[to_model],
lhs,
rhs,
))
_traverse(to_model)
_traverse(model_class)
return parsed
def parse_expr_list(self, s, alias_map):
parsed = []
data = []
for expr in s:
expr_str, vars = self.parse_expr(expr, alias_map)
parsed.append(expr_str)
data.extend(vars)
return ', '.join(parsed), data
def calculate_alias_map(self, query, start=1):
alias_map = {query.model_class: 't%s' % start}
for model, joins in query._joins.items():
if model not in alias_map:
start += 1
alias_map[model] = 't%s' % start
for join in joins:
if join.model_class not in alias_map:
start += 1
alias_map[join.model_class] = 't%s' % start
return alias_map
def parse_select_query(self, query, start=1, alias_map=None):
model = query.model_class
db = model._meta.database
alias_map = alias_map or {}
alias_map.update(self.calculate_alias_map(query, start))
parts = ['SELECT']
params = []
if query._distinct:
parts.append('DISTINCT')
selection = query._select
select, s_params = self.parse_expr_list(selection, alias_map)
parts.append(select)
params.extend(s_params)
parts.append('FROM %s AS %s' % (self.quote(model._meta.db_table), alias_map[model]))
joins = self.parse_joins(query._joins, query.model_class, alias_map)
if joins:
parts.append(' '.join(joins))
where, w_params = self.parse_query_node(query._where, alias_map)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
if query._group_by:
group_by, g_params = self.parse_expr_list(query._group_by, alias_map)
parts.append('GROUP BY %s' % group_by)
params.extend(g_params)
if query._having:
having, h_params = self.parse_query_node(query._having, alias_map)
parts.append('HAVING %s' % having)
params.extend(h_params)
if query._order_by:
order_by, _ = self.parse_expr_list(query._order_by, alias_map)
parts.append('ORDER BY %s' % order_by)
if query._limit or (query._offset and not db.empty_limit):
limit = query._limit or -1
parts.append('LIMIT %s' % limit)
if query._offset:
parts.append('OFFSET %s' % query._offset)
if query._for_update:
parts.append('FOR UPDATE')
return ' '.join(parts), params
def _parse_field_dictionary(self, d):
sets, params = [], []
for field, expr in d.items():
field_str, _ = self.parse_expr(field)
val_str, val_params = self.parse_expr(expr)
val_params = [field.db_value(vp) for vp in val_params]
sets.append((field_str, val_str))
params.extend(val_params)
return sets, params
def parse_update_query(self, query):
model = query.model_class
parts = ['UPDATE %s SET' % self.quote(model._meta.db_table)]
sets, params = self._parse_field_dictionary(query._update)
parts.append(', '.join('%s=%s' % (f, v) for f, v in sets))
where, w_params = self.parse_query_node(query._where, None)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
return ' '.join(parts), params
def parse_insert_query(self, query):
model = query.model_class
parts = ['INSERT INTO %s' % self.quote(model._meta.db_table)]
sets, params = self._parse_field_dictionary(query._insert)
parts.append('(%s)' % ', '.join(s[0] for s in sets))
parts.append('VALUES (%s)' % ', '.join(s[1] for s in sets))
return ' '.join(parts), params
def parse_delete_query(self, query):
model = query.model_class
parts = ['DELETE FROM %s' % self.quote(model._meta.db_table)]
params = []
where, w_params = self.parse_query_node(query._where, None)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
return ' '.join(parts), params
def field_sql(self, field):
attrs = field.attributes
attrs['column_type'] = self.get_field(field.get_db_field())
template = field.template
if isinstance(field, ForeignKeyField):
to_pk = field.rel_model._meta.primary_key
if not isinstance(to_pk, PrimaryKeyField):
template = to_pk.template
attrs.update(to_pk.attributes)
parts = [self.quote(field.db_column), template]
if not field.null:
parts.append('NOT NULL')
if field.primary_key:
parts.append('PRIMARY KEY')
if isinstance(field, ForeignKeyField):
ref_mc = (
self.quote(field.rel_model._meta.db_table),
self.quote(field.rel_model._meta.primary_key.db_column),
)
parts.append('REFERENCES %s (%s)' % ref_mc)
parts.append('%(cascade)s%(extra)s')
elif field.sequence:
parts.append("DEFAULT NEXTVAL('%s')" % self.quote(field.sequence))
return ' '.join(p % attrs for p in parts)
def parse_create_table(self, model_class, safe=False):
parts = ['CREATE TABLE']
if safe:
parts.append('IF NOT EXISTS')
parts.append(self.quote(model_class._meta.db_table))
columns = ', '.join(self.field_sql(f) for f in model_class._meta.get_fields())
parts.append('(%s)' % columns)
return parts
def create_table(self, model_class, safe=False):
return ' '.join(self.parse_create_table(model_class, safe))
def drop_table(self, model_class, fail_silently=False, cascade=False):
parts = ['DROP TABLE']
if fail_silently:
parts.append('IF EXISTS')
parts.append(self.quote(model_class._meta.db_table))
if cascade:
parts.append('CASCADE')
return ' '.join(parts)
def parse_create_index(self, model_class, fields, unique):
tbl_name = model_class._meta.db_table
colnames = [f.db_column for f in fields]
parts = ['CREATE %s' % ('UNIQUE INDEX' if unique else 'INDEX')]
parts.append(self.quote('%s_%s' % (tbl_name, '_'.join(colnames))))
parts.append('ON %s' % self.quote(tbl_name))
parts.append('(%s)' % ', '.join(map(self.quote, colnames)))
return parts
def create_index(self, model_class, fields, unique):
return ' '.join(self.parse_create_index(model_class, fields, unique))
def create_sequence(self, sequence_name):
return 'CREATE SEQUENCE %s;' % self.quote(sequence_name)
def drop_sequence(self, sequence_name):
return 'DROP SEQUENCE %s;' % self.quote(sequence_name)
class QueryResultWrapper(object):
"""
Provides an iterator over the results of a raw Query, additionally doing
two things:
- converts rows from the database into model instances
- ensures that multiple iterations do not result in multiple queries
"""
def __init__(self, model, cursor, meta=None):
self.model = model
self.cursor = cursor
self.naive = not meta
if self.naive:
cols = []
non_cols = []
for i in range(len(self.cursor.description)):
col = self.cursor.description[i][0]
if col in model._meta.columns:
cols.append((i, model._meta.columns[col]))
else:
non_cols.append((i, col))
self._cols = cols
self._non_cols = non_cols
else:
self.column_meta, self.join_meta = meta
self.__ct = 0
self.__idx = 0
self._result_cache = []
self._populated = False
def simple_iter(self, row):
instance = self.model()
for i, f in self._cols:
setattr(instance, f.name, f.python_value(row[i]))
for i, f in self._non_cols:
setattr(instance, f, row[i])
return instance
def construct_instance(self, row):
# we have columns, models, and a graph of joins to reconstruct
collected_models = {}
cols = [c[0] for c in self.cursor.description]
for i, expr in enumerate(self.column_meta):
value = row[i]
if isinstance(expr, Field):
model = expr.model_class
else:
model = self.model
if model not in collected_models:
collected_models[model] = model()
instance = collected_models[model]
if isinstance(expr, Field):
setattr(instance, expr.name, expr.python_value(value))
elif isinstance(expr, Expr) and expr._alias:
setattr(instance, expr._alias, value)
else:
setattr(instance, cols[i], value)
return self.follow_joins(self.join_meta, collected_models, self.model)
def follow_joins(self, joins, collected_models, current):
inst = collected_models[current]
if current not in joins:
return inst
for joined_model, _, _ in joins[current]:
if joined_model in collected_models:
joined_inst = self.follow_joins(joins, collected_models, joined_model)
fk_field = current._meta.rel_for_model(joined_model)
if not fk_field:
continue
if joined_inst.get_id() is None and fk_field.name in inst._data:
rel_inst_id = inst._data[fk_field.name]
joined_inst.set_id(rel_inst_id)
setattr(inst, fk_field.name, joined_inst)
return inst
def __iter__(self):
self.__idx = 0
if not self._populated:
return self
else:
return iter(self._result_cache)
def iterate(self):
row = self.cursor.fetchone()
if not row:
self._populated = True
raise StopIteration
if self.naive:
return self.simple_iter(row)
else:
return self.construct_instance(row)
def iterator(self):
while 1:
yield self.iterate()
def next(self):
if self.__idx < self.__ct:
inst = self._result_cache[self.__idx]
self.__idx += 1
return inst
instance = self.iterate()
instance.prepared() # <-- model prepared hook
self._result_cache.append(instance)
self.__ct += 1
self.__idx += 1
return instance
def fill_cache(self, n=None):
n = n or float('Inf')
self.__idx = self.__ct
while not self._populated and (n > self.__ct):
try:
self.next()
except StopIteration:
break
def returns_clone(func):
def inner(self, *args, **kwargs):
clone = self.clone()
func(clone, *args, **kwargs)
return clone
inner.call_local = func
return inner
def not_allowed(fn):
def inner(self, *args, **kwargs):
raise NotImplementedError('%s is not allowed on %s instances' % (
fn, type(self).__name__,
))
return inner
Join = namedtuple('Join', ('model_class', 'join_type', 'on'))
class Query(object):
require_commit = True
def __init__(self, model_class):
self.model_class = model_class
self.database = model_class._meta.database
self._dirty = True
self._query_ctx = model_class
self._joins = {self.model_class: []} # adjacency graph
self._where = None
def clone(self):
query = type(self)(self.model_class)
if self._where is not None:
query._where = self._where.clone()
query._joins = self.clone_joins()
query._query_ctx = self._query_ctx
return query
def clone_joins(self):
return dict(
(mc, list(j)) for mc, j in self._joins.items()
)
@returns_clone
def where(self, *q_or_node):
if self._where is None:
self._where = reduce(operator.and_, q_or_node)
else:
for piece in q_or_node:
self._where &= piece
@returns_clone
def join(self, model_class, join_type=None, on=None):
if not self._query_ctx._meta.rel_exists(model_class):
raise ValueError('No foreign key between %s and %s' % (
self._query_ctx, model_class,
))
if on and isinstance(on, basestring):
on = self._query_ctx._meta.fields[on]
self._joins.setdefault(self._query_ctx, [])
self._joins[self._query_ctx].append(Join(model_class, join_type, on))
self._query_ctx = model_class
@returns_clone
def switch(self, model_class=None):
self._query_ctx = model_class or self.model_class
def ensure_join(self, lm, rm, on=None):
ctx = self._query_ctx
for join in self._joins.get(lm, []):
if join.model_class == rm:
return self
query = self.switch(lm).join(rm, on=on).switch(ctx)
return query
def convert_dict_to_node(self, qdict):
accum = []
joins = []
for key, value in sorted(qdict.items()):
curr = self.model_class
if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
key, op = key.rsplit('__', 1)
op = DJANGO_MAP[op]
else:
op = OP_EQ
for piece in key.split('__'):
model_attr = getattr(curr, piece)
if isinstance(model_attr, (ForeignKeyField, ReverseRelationDescriptor)):
curr = model_attr.rel_model
joins.append(model_attr)
accum.append(Expr(model_attr, op, value))
return accum, joins
def filter(self, *args, **kwargs):
# normalize args and kwargs into a new expression
dq_node = Leaf()
if args:
dq_node &= reduce(operator.and_, [a.clone() for a in args])
if kwargs:
dq_node &= DQ(**kwargs)
# dq_node should now be an Expr, lhs = Leaf(), rhs = ...
q = deque([dq_node])
dq_joins = set()
while q:
curr = q.popleft()
if not isinstance(curr, Expr):
continue
for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
if isinstance(piece, DQ):
query, joins = self.convert_dict_to_node(piece.query)
dq_joins.update(joins)
setattr(curr, side, reduce(operator.and_, query))
else:
q.append(piece)
dq_node = dq_node.rhs
query = self.clone()
for field in dq_joins:
if isinstance(field, ForeignKeyField):
lm, rm = field.model_class, field.rel_model
field_obj = field
elif isinstance(field, ReverseRelationDescriptor):
lm, rm = field.field.rel_model, field.rel_model
field_obj = field.field
query = query.ensure_join(lm, rm, field_obj)
return query.where(dq_node)
def sql(self, compiler):
raise NotImplementedError()
def execute(self):
raise NotImplementedError
class RawQuery(Query):
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
self._qr = None
super(RawQuery, self).__init__(model)
def clone(self):
return RawQuery(self.model_class, self._sql, *self._params)
def sql(self, compiler):
return self._sql, self._params
join = not_allowed('joining')
where = not_allowed('where')
switch = not_allowed('switch')
def execute(self):
if self._qr is None:
self._qr = QueryResultWrapper(self.model_class, self.database.execute(self), None)
return self._qr
def __iter__(self):
return iter(self.execute())
class SelectQuery(Query):
require_commit = False
def __init__(self, model_class, *selection):
self._explicit_selection = len(selection) > 0
self._select = self._model_shorthand(selection or model_class._meta.get_fields())
self._group_by = None
self._having = None
self._order_by = None
self._limit = None
self._offset = None
self._distinct = False
self._for_update = False
self._naive = False
self._qr = None
super(SelectQuery, self).__init__(model_class)
def clone(self):
query = super(SelectQuery, self).clone()
query._explicit_selection = self._explicit_selection
query._select = list(self._select)
if self._group_by is not None:
query._group_by = list(self._group_by)
if self._having:
query._having = self._having.clone()
if self._order_by is not None:
query._order_by = list(self._order_by)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._for_update = self._for_update
query._naive = self._naive
return query
def _model_shorthand(self, args):
accum = []
for arg in args:
if isinstance(arg, Leaf):
accum.append(arg)
elif issubclass(arg, Model):
accum.extend(arg._meta.get_fields())
return accum
@returns_clone
def group_by(self, *args):
self._group_by = self._model_shorthand(args)
@returns_clone
def having(self, *q_or_node):
if self._having is None:
self._having = reduce(operator.and_, q_or_node)
else:
for piece in q_or_node:
self._having &= piece
@returns_clone
def order_by(self, *args):
self._order_by = list(args)
@returns_clone
def limit(self, lim):
self._limit = lim
@returns_clone
def offset(self, off):
self._offset = off
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def distinct(self, is_distinct=True):
self._distinct = is_distinct
@returns_clone
def for_update(self, for_update=True):
self._for_update = for_update
@returns_clone
def naive(self, naive=True):
self._naive = naive
def annotate(self, rel_model, annotation=None):
annotation = annotation or fn.Count(rel_model._meta.primary_key).alias('count')
query = self.clone()
query = query.ensure_join(query._query_ctx, rel_model)
if not query._group_by:
query._group_by = list(query._select)
query._select = tuple(query._select) + (annotation,)
return query
def _aggregate(self, aggregation=None):
aggregation = aggregation or fn.Count(self.model_class._meta.primary_key)
query = self.order_by()
query._select = (aggregation,)
return query
def aggregate(self, aggregation=None):
query = self._aggregate(aggregation)
compiler = self.database.get_compiler()
sql, params = query.sql(compiler)
curs = query.database.execute_sql(sql, params, require_commit=False)
return curs.fetchone()[0]
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
clone = self.order_by()
clone._limit = clone._offset = None
clone._select = [fn.Count(clone.model_class._meta.primary_key)]
res = clone.database.execute(clone)
return (res.fetchone() or [0])[0]
def wrapped_count(self):
clone = self.order_by()
clone._limit = clone._offset = None
compiler = self.database.get_compiler()
sql, params = clone.sql(compiler)
query = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
res = clone.database.execute_sql(query, params, require_commit=False)
return res.fetchone()[0]
def exists(self):
clone = self.paginate(1, 1)
clone._select = [self.model_class._meta.primary_key]
res = self.database.execute(clone)
return bool(res.fetchone())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % (
self.sql(self.database.get_compiler())
))
def sql(self, compiler):
return compiler.parse_select_query(self)
def verify_naive(self):
for expr in self._select:
if isinstance(expr, Field) and expr.model_class != self.model_class:
return False
return True
def execute(self):
if self._dirty or not self._qr:
if self._naive or not self._joins or self.verify_naive():
query_meta = None
else:
query_meta = [self._select, self._joins]
self._qr = QueryResultWrapper(self.model_class, self.database.execute(self), query_meta)
self._dirty = False
return self._qr
else:
return self._qr
def __iter__(self):
return iter(self.execute())
def __getitem__(self, value):
offset = limit = None
if isinstance(value, slice):
if value.start:
offset = value.start
if value.stop:
limit = value.stop - (value.start or 0)
else:
if value < 0:
raise ValueError('Negative indexes are not supported, try ordering in reverse')
offset = value
limit = 1
if self._limit != limit or self._offset != offset:
self._qr = None
self._limit = limit
self._offset = offset
res = list(self)
return limit == 1 and res[0] or res
class UpdateQuery(Query):
def __init__(self, model_class, update=None):
self._update = update
super(UpdateQuery, self).__init__(model_class)
def clone(self):
query = super(UpdateQuery, self).clone()
query._update = dict(self._update)
return query
join = not_allowed('joining')
def sql(self, compiler):
return compiler.parse_update_query(self)
def execute(self):
result = self.database.execute(self)
return self.database.rows_affected(result)
class InsertQuery(Query):
def __init__(self, model_class, insert=None):
mm = model_class._meta
query = dict((mm.fields[f], v) for f, v in mm.get_default_dict().items())
query.update(insert)
self._insert = query
super(InsertQuery, self).__init__(model_class)
def clone(self):
query = super(InsertQuery, self).clone()
query._insert = dict(self._insert)
return query
join = not_allowed('joining')
where = not_allowed('where clause')
def sql(self, compiler):
return compiler.parse_insert_query(self)
def execute(self):
result = self.database.execute(self)
return self.database.last_insert_id(result, self.model_class)
class DeleteQuery(Query):
join = not_allowed('joining')
def sql(self, compiler):
return compiler.parse_delete_query(self)
def execute(self):
result = self.database.execute(self)
return self.database.rows_affected(result)
class Database(object):
commit_select = False
compiler_class = QueryCompiler
empty_limit = False
field_overrides = {}
for_update = False
interpolation = '?'
op_overrides = {}
quote_char = '"'
reserved_tables = []
sequences = False
subquery_delete_same_table = True
def __init__(self, database, threadlocals=False, autocommit=True,
fields=None, ops=None, **connect_kwargs):
self.init(database, **connect_kwargs)
if threadlocals:
self.__local = threading.local()
else:
self.__local = type('DummyLocal', (object,), {})
self._conn_lock = threading.Lock()
self.autocommit = autocommit
self.field_overrides = dict_update(self.field_overrides, fields or {})
self.op_overrides = dict_update(self.op_overrides, ops or {})
def init(self, database, **connect_kwargs):
self.deferred = database is None
self.database = database
self.connect_kwargs = connect_kwargs
def connect(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized before opening connection')
self.__local.conn = self._connect(self.database, **self.connect_kwargs)
self.__local.closed = False
def close(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized before closing connection')
self._close(self.__local.conn)
self.__local.closed = True
def get_conn(self):
if not hasattr(self.__local, 'closed') or self.__local.closed:
self.connect()
return self.__local.conn
def is_closed(self):
return getattr(self.__local, 'closed', True)
def get_cursor(self):
return self.get_conn().cursor()
def _close(self, conn):
conn.close()
def _connect(self, database, **kwargs):
raise NotImplementedError
@classmethod
def register_fields(cls, fields):
cls.field_overrides = dict_update(cls.field_overrides, fields)
@classmethod
def register_ops(cls, ops):
cls.op_overrides = dict_update(cls.op_overrides, ops)
def last_insert_id(self, cursor, model):
if model._meta.auto_increment:
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
def get_compiler(self):
return self.compiler_class(
self.quote_char, self.interpolation, self.field_overrides,
self.op_overrides)
def execute(self, query):
sql, params = query.sql(self.get_compiler())
if isinstance(query, (SelectQuery, RawQuery)):
commit = self.commit_select
else:
commit = query.require_commit
return self.execute_sql(sql, params, commit)
def execute_sql(self, sql, params=None, require_commit=True):
cursor = self.get_cursor()
res = cursor.execute(sql, params or ())
if require_commit and self.get_autocommit():
self.commit()
logger.debug((sql, params))
return cursor
def begin(self):
pass
def commit(self):
self.get_conn().commit()
def rollback(self):
self.get_conn().rollback()
def set_autocommit(self, autocommit):
self.__local.autocommit = autocommit
def get_autocommit(self):
if not hasattr(self.__local, 'autocommit'):
self.set_autocommit(self.autocommit)
return self.__local.autocommit
def get_tables(self):
raise NotImplementedError
def get_indexes_for_table(self, table):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def create_table(self, model_class):
qc = self.get_compiler()
return self.execute_sql(qc.create_table(model_class))
def create_index(self, model_class, fields, unique=False):
qc = self.get_compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('fields passed to "create_index" must be a list or tuple: "%s"' % fields)
field_objs = [model_class._meta.fields[f] if isinstance(f, basestring) else f for f in fields]
return self.execute_sql(qc.create_index(model_class, field_objs, unique))
def create_foreign_key(self, model_class, field):
if not field.primary_key:
return self.create_index(model_class, [field], field.unique)
def create_sequence(self, seq):
if self.sequences:
qc = self.get_compiler()
return self.execute_sql(qc.create_sequence(seq))
def drop_table(self, model_class, fail_silently=False):
qc = self.get_compiler()
return self.execute_sql(qc.drop_table(model_class, fail_silently))
def drop_sequence(self, seq):
if self.sequences:
qc = self.get_compiler()
return self.execute_sql(qc.drop_sequence(seq))
def transaction(self):
return transaction(self)
def commit_on_success(self, func):
def inner(*args, **kwargs):
orig = self.get_autocommit()
self.set_autocommit(False)
self.begin()
try:
res = func(*args, **kwargs)
self.commit()
except:
self.rollback()
raise
else:
return res
finally:
self.set_autocommit(orig)
return inner
class SqliteDatabase(Database):
op_overrides = {
OP_LIKE: 'GLOB',
OP_ILIKE: 'LIKE',
}
def _connect(self, database, **kwargs):
if not sqlite3:
raise ImproperlyConfigured('sqlite3 must be installed on the system')
return sqlite3.connect(database, **kwargs)
def get_indexes_for_table(self, table):
res = self.execute_sql('PRAGMA index_list(%s);' % self.quote(table))
rows = sorted([(r[1], r[2] == 1) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute_sql('select name from sqlite_master where type="table" order by name')
return [r[0] for r in res.fetchall()]
class PostgresqlDatabase(Database):
commit_select = True
empty_limit = True
field_overrides = {
'bigint': 'BIGINT',
'bool': 'BOOLEAN',
'datetime': 'TIMESTAMP',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'primary_key': 'SERIAL',
}
for_update = True
interpolation = '%s'
reserved_tables = ['user']
sequences = True
def _connect(self, database, **kwargs):
if not psycopg2:
raise ImproperlyConfigured('psycopg2 must be installed on the system')
return psycopg2.connect(database=database, **kwargs)
def last_insert_id(self, cursor, model):
seq = model._meta.primary_key.sequence
if seq:
cursor.execute("SELECT CURRVAL('\"%s\"')" % (seq))
return cursor.fetchone()[0]
elif model._meta.auto_increment:
cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (
model._meta.db_table, model._meta.primary_key.db_column))
return cursor.fetchone()[0]
def get_indexes_for_table(self, table):
res = self.execute_sql("""
SELECT c2.relname, i.indisprimary, i.indisunique
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i
WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid
ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname""", (table,))
return sorted([(r[0], r[1]) for r in res.fetchall()])
def get_tables(self):
res = self.execute_sql("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY c.relname""")
return [row[0] for row in res.fetchall()]
def sequence_exists(self, sequence):
res = self.execute_sql("""
SELECT COUNT(*)
FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))
return bool(res.fetchone()[0])
def set_search_path(self, *search_path):
path_params = ','.join(['%s'] * len(search_path))
self.execute_sql('SET search_path TO %s' % path_params, search_path)
class MySQLDatabase(Database):
commit_select = True
field_overrides = {
'bigint': 'BIGINT',
'boolean': 'BOOL',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'float': 'FLOAT',
'primary_key': 'INTEGER AUTO_INCREMENT',
'text': 'LONGTEXT',
}
for_update = True
interpolation = '%s'
op_overrides = {OP_LIKE: 'LIKE BINARY', OP_ILIKE: 'LIKE'}
quote_char = '`'
subquery_delete_same_table = False
def _connect(self, database, **kwargs):
if not mysql:
raise ImproperlyConfigured('MySQLdb must be installed on the system')
conn_kwargs = {
'charset': 'utf8',
'use_unicode': True,
}
conn_kwargs.update(kwargs)
return mysql.connect(db=database, **conn_kwargs)
def create_foreign_key(self, model_class, field):
compiler = self.get_compiler()
framing = """
ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s
FOREIGN KEY (%(field)s) REFERENCES %(to)s(%(to_field)s)%(cascade)s;
"""
db_table = model_class._meta.db_table
constraint = 'fk_%s_%s_%s' % (
db_table,
field.rel_model._meta.db_table,
field.db_column,
)
query = framing % {
'table': compiler.quote(db_table),
'constraint': compiler.quote(constraint),
'field': compiler.quote(field.db_column),
'to': compiler.quote(field.rel_model._meta.db_table),
'to_field': compiler.quote(field.rel_model._meta.primary_key.db_column),
'cascade': ' ON DELETE CASCADE' if field.cascade else '',
}
self.execute_sql(query)
return super(MySQLDatabase, self).create_foreign_key(model_class, field)
def get_indexes_for_table(self, table):
res = self.execute_sql('SHOW INDEXES IN `%s`;' % table)
rows = sorted([(r[2], r[1] == 0) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute_sql('SHOW TABLES;')
return [r[0] for r in res.fetchall()]
class transaction(object):
def __init__(self, db):
self.db = db
def __enter__(self):
self._orig = self.db.get_autocommit()
self.db.set_autocommit(False)
self.db.begin()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.db.rollback()
else:
self.db.commit()
self.db.set_autocommit(self._orig)
class DoesNotExist(Exception):
pass
# doing an IN on empty set
class EmptyResultException(Exception):
pass
default_database = SqliteDatabase('peewee.db')
class ModelOptions(object):
def __init__(self, cls, database=None, db_table=None, indexes=None,
order_by=None, primary_key=None):
self.model_class = cls
self.name = cls.__name__.lower()
self.fields = {}
self.columns = {}
self.defaults = {}
self.database = database or default_database
self.db_table = db_table
self.indexes = indexes or []
self.order_by = order_by
self.primary_key = primary_key
self.auto_increment = None
self.rel = {}
self.reverse_rel = {}
def prepared(self):
for field in self.fields.values():
if field.default is not None:
self.defaults[field] = field.default
if self.order_by:
norm_order_by = []
for clause in self.order_by:
field = self.fields[clause.lstrip('-')]
if clause.startswith('-'):
norm_order_by.append(field.desc())
else:
norm_order_by.append(field.asc())
self.order_by = norm_order_by
def get_default_dict(self):
dd = {}
for field, default in self.defaults.items():
if callable(default):
dd[field.name] = default()
else:
dd[field.name] = default
return dd
def get_sorted_fields(self):
return sorted(self.fields.items(), key=lambda (k,v): (v is self.primary_key and 1 or 2, v._order))
def get_field_names(self):
return [f[0] for f in self.get_sorted_fields()]
def get_fields(self):
return [f[1] for f in self.get_sorted_fields()]
def rel_for_model(self, model, field_obj=None):
for field in self.get_fields():
if isinstance(field, ForeignKeyField) and field.rel_model == model:
if field_obj is None or field_obj.name == field.name:
return field
def reverse_rel_for_model(self, model):
return model._meta.rel_for_model(self.model_class)
def rel_exists(self, model):
return self.rel_for_model(model) or self.reverse_rel_for_model(model)
class BaseModel(type):
inheritable_options = ['database', 'indexes', 'order_by', 'primary_key']
def __new__(cls, name, bases, attrs):
if not bases:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
meta_options.update((k, v) for k, v in meta.__dict__.items() if not k.startswith('_'))
# inherit any field descriptors by deep copying the underlying field obj
# into the attrs of the new model, additionally see if the bases define
# inheritable model options and swipe them
for b in bases:
if not hasattr(b, '_meta'):
continue
base_meta = getattr(b, '_meta')
for (k, v) in base_meta.__dict__.items():
if k in cls.inheritable_options and k not in meta_options:
meta_options[k] = v
for (k, v) in b.__dict__.items():
if isinstance(v, FieldDescriptor) and k not in attrs:
if not v.field.primary_key:
attrs[k] = deepcopy(v.field)
# initialize the new class and set the magic attributes
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
cls._data = None
primary_key = None
# replace the fields with field descriptors, calling the add_to_class hook
for name, attr in cls.__dict__.items():
cls._meta.indexes = list(cls._meta.indexes)
if isinstance(attr, Field):
attr.add_to_class(cls, name)
if attr.primary_key:
primary_key = attr
if not primary_key:
primary_key = PrimaryKeyField(primary_key=True)
primary_key.add_to_class(cls, 'id')
cls._meta.primary_key = primary_key
cls._meta.auto_increment = isinstance(primary_key, PrimaryKeyField) or primary_key.sequence
if not cls._meta.db_table:
cls._meta.db_table = re.sub('[^\w]+', '_', cls.__name__.lower())
# create a repr and error class before finalizing
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %r>' % (
cls.__name__, self.__unicode__()))
exception_class = type('%sDoesNotExist' % cls.__name__, (DoesNotExist,), {})
cls.DoesNotExist = exception_class
cls._meta.prepared()
return cls
class Model(object):
__metaclass__ = BaseModel
def __init__(self, *args, **kwargs):
self._data = self._meta.get_default_dict()
self._obj_cache = {} # cache of related objects
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def select(cls, *selection):
query = SelectQuery(cls, *selection)
if cls._meta.order_by:
query = query.order_by(*cls._meta.order_by)
return query
@classmethod
def update(cls, **update):
fdict = dict((cls._meta.fields[f], v) for f, v in update.items())
return UpdateQuery(cls, fdict)
@classmethod
def insert(cls, **insert):
fdict = dict((cls._meta.fields[f], v) for f, v in insert.items())
return InsertQuery(cls, fdict)
@classmethod
def delete(cls):
return DeleteQuery(cls)
@classmethod
def raw(cls, sql, *params):
return RawQuery(cls, sql, *params)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
return inst
@classmethod
def get(cls, *query, **kwargs):
sq = cls.select().naive()
if query:
sq = sq.where(*query)
if kwargs:
sq = sq.filter(**kwargs)
return sq.get()
@classmethod
def get_or_create(cls, **kwargs):
sq = cls.select().filter(**kwargs)
try:
return sq.get()
except cls.DoesNotExist:
return cls.create(**kwargs)
@classmethod
def filter(cls, *dq, **query):
return cls.select().filter(*dq, **query)
@classmethod
def table_exists(cls):
return cls._meta.db_table in cls._meta.database.get_tables()
@classmethod
def create_table(cls, fail_silently=False):
if fail_silently and cls.table_exists():
return
db = cls._meta.database
pk = cls._meta.primary_key
if db.sequences and pk.sequence and not db.sequence_exists(pk.sequence):
db.create_sequence(pk.sequence)
db.create_table(cls)
for field_name, field_obj in cls._meta.fields.items():
if isinstance(field_obj, ForeignKeyField):
db.create_foreign_key(cls, field_obj)
elif field_obj.index or field_obj.unique:
db.create_index(cls, [field_obj], field_obj.unique)
if cls._meta.indexes:
for fields, unique in cls._meta.indexes:
db.create_index(cls, fields, unique)
@classmethod
def drop_table(cls, fail_silently=False):
cls._meta.database.drop_table(cls, fail_silently)
def get_id(self):
return getattr(self, self._meta.primary_key.name)
def set_id(self, id):
setattr(self, self._meta.primary_key.name, id)
def prepared(self):
pass
def save(self, force_insert=False):
field_dict = dict(self._data)
pk = self._meta.primary_key
if self.get_id() is not None and not force_insert:
field_dict.pop(pk.name)
update = self.update(
**field_dict
).where(pk == self.get_id())
update.execute()
else:
if self._meta.auto_increment:
field_dict.pop(pk.name, None)
insert = self.insert(**field_dict)
new_pk = insert.execute()
if self._meta.auto_increment:
self.set_id(new_pk)
def dependencies(self, search_nullable=False):
stack = [(type(self), self.select().where(self._meta.primary_key == self.get_id()))]
seen = set()
while stack:
klass, query = stack.pop()
if klass in seen:
continue
seen.add(klass)
for rel_name, fk in klass._meta.reverse_rel.items():
rel_model = fk.model_class
expr = fk << query
if not fk.null or search_nullable:
stack.append((rel_model, rel_model.select().where(expr)))
yield (expr, fk)
def delete_instance(self, recursive=False, delete_nullable=False):
if recursive:
for query, fk in reversed(list(self.dependencies(delete_nullable))):
if fk.null and not delete_nullable:
fk.model_class.update(**{fk.name: None}).where(query).execute()
else:
fk.model_class.delete().where(query).execute()
return self.delete().where(self._meta.primary_key == self.get_id()).execute()
def __eq__(self, other):
return other.__class__ == self.__class__ and \
self.get_id() is not None and \
other.get_id() == self.get_id()
def __ne__(self, other):
return not self == other
def create_model_tables(models, **create_table_kwargs):
"""Create tables for all given models (in the right order)."""
for m in sort_models_topologically(models):
m.create_table(**create_table_kwargs)
def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
m.drop_table(**drop_table_kwargs)
def sort_models_topologically(models):
"""Sort models topologically so that parents will precede children."""
models = set(models)
seen = set()
ordering = []
def dfs(model):
if model in models and model not in seen:
seen.add(model)
for foreign_key in model._meta.reverse_rel.values():
dfs(foreign_key.model_class)
ordering.append(model) # parent will follow descendants
# order models by name and table initially to guarantee a total ordering
names = lambda m: (m._meta.name, m._meta.db_table)
for m in sorted(models, key=names, reverse=True):
dfs(m)
return list(reversed(ordering)) # want parents first in output ordering
def raw_sql(query):
db = query.model_class._meta.database
return query.sql(db.get_compiler())
|
mit
| 656,235,991,233,999,200
| 30.508409
| 114
| 0.543091
| false
| 3.930966
| false
| false
| false
|
QuinnSong/JPG-Tools
|
src/background.py
|
1
|
1161
|
# Modified based on Phatch
#---PIL modules import
from shadow import fill_background_color, generate_layer, \
remove_alpha, has_transparency, get_alpha, paste
from PIL import Image
#from reflection import HTMLColorToRGBA
FILL_CHOICES = ('Color', 'Image')
def background(image, fill, mark, color,
horizontal_offset=None, vertical_offset=None,
horizontal_justification=None, vertical_justification=None,
orientation=None, method=None, opacity=100):
"""color is RGB"""
if not has_transparency(image):
return image
if image.mode == 'P':
image = image.convert('RGBA')
if fill == FILL_CHOICES[0]:
opacity = (255 * opacity) / 100
r,g,b = color
return fill_background_color(image, (r,g,b, opacity))
elif fill == FILL_CHOICES[1]:
layer = generate_layer(image.size, mark, method,
horizontal_offset, vertical_offset,
horizontal_justification,
vertical_justification,
orientation, opacity)
paste(layer, image, mask=image)
return layer
|
gpl-3.0
| -3,725,835,973,732,491,000
| 36.483871
| 67
| 0.605512
| false
| 4.206522
| false
| false
| false
|
javahust/dotamax
|
dataIngest/crawler.py
|
1
|
4059
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dota2api
import datetime
import time
import thread
api = dota2api.Initialise()
seed_user_id = 161877399
#seed_user_id = 98887913
hero_list = api.get_heroes()['heroes']
item_list = api.get_game_items()['items']
# heroes
# {
# count - Number of results
# status - HTTP status code
# [heroes]
# {
# id - Unique hero ID
# name - Hero's name
# localized_name - Localized version of hero's name
# url_full_portrait - URL to full-size hero portrait (256x144)
# url_large_portrait - URL to large hero portrait (205x115)
# url_small_portrait - URL to small hero portrait (59x33)
# url_vertical_portrait - URL to vertical hero portrait (235x272)
# }
# }
#items:
# {
# count - Number of results
# status - HTTP status respose
# [items]
# {
# id - Unique item ID
# name - Item's name
# cost - Item's gold cost in game, 0 if recipe
# localized_name - Item's localized name
# recipe - True if item is a recipe item, false otherwise
# secret_shop - True if item is bought at the secret shop, false otherwise
# side_shop - True if item is bought at the side shop, false otherwise
# }
# }
# getmatchhistory Parameters:
# account_id – (int, optional)
# hero_id – (int, optional)
# game_mode – (int, optional) see ref/modes.json
# skill – (int, optional) see ref/skill.json
# min_players – (int, optional) only return matches with minimum amount of players
# league_id – (int, optional) for ids use get_league_listing()
# start_at_match_id – (int, optional) start at matches equal to or older than this match id
# matches_requested – (int, optional) defaults to 100
# tournament_games_only – (str, optional) limit results to tournament matches only
# response
# {
# num_results - Number of matches within a single response
# total_results - Total number of matches for this query
# results_remaining - Number of matches remaining to be retrieved with subsequent API calls
# [matches] - List of matches for this response
# {
# match_id - Unique match ID
# match_seq_num - Number indicating position in which this match was recorded
# start_time - Unix timestamp of beginning of match
# lobby_type - See lobby_type table
# [player] - List of players in the match
# {
# account_id - Unique account ID
# player_slot - Player's position within the team
# hero_id - Unique hero ID
# }
# }
# }
def traverse_user_match(user_id):
match_set = set()
for hero in hero_list:
id = hero['id']
count = len(match_set)
batch_matches = api.get_match_history(account_id=user_id, hero_id=id)
while True:
# do process
matches = batch_matches['matches']
for match in matches:
match_set.add(match['match_id'])
if batch_matches['num_results'] < 100:
break
batch_matches = api.get_match_history(account_id=user_id, start_at_match_id=matches[-1]['match_id'], hero_id=id)
print "play {0} for {1} matches".format(hero['name'], (len(match_set) - count))
print len(match_set)
print match_set
def date_to_timestamp(date_str):
return int(time.mktime(datetime.datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S").timetuple()))
def timestamp_to_date(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
def main():
a = datetime.datetime.now()
traverse_user_match(seed_user_id)
b = datetime.datetime.now()
print(b - a)
if __name__ == "__main__":
main()
|
apache-2.0
| 1,302,979,220,849,933,000
| 35.080357
| 124
| 0.579065
| false
| 3.520035
| false
| false
| false
|
priyom/priyomdb
|
PriyomHTTP/Server/Resources/API/InstanciateSchedules.py
|
1
|
2595
|
"""
File name: InstanciateSchedules.py
This file is part of: priyomdb
LICENSE
The contents of this file are subject to the Mozilla Public License
Version 1.1 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
License for the specific language governing rights and limitations under
the License.
Alternatively, the contents of this file may be used under the terms of
the GNU General Public license (the "GPL License"), in which case the
provisions of GPL License are applicable instead of those above.
FEEDBACK & QUESTIONS
For feedback and questions about priyomdb please e-mail one of the
authors:
Jonas Wielicki <j.wielicki@sotecware.net>
"""
from WebStack.Generic import ContentType
import time
from datetime import datetime, timedelta
from libPriyom import *
from libPriyom.Formatting import priyomdate
from PriyomHTTP.Server.limits import queryLimits
from PriyomHTTP.Server.Resources.API.API import API, CallSyntax, Argument
class InstanciateSchedulesAPI(API):
title = u"instanciateSchedules"
shortDescription = u"instanciate schedules"
docArgs = [
Argument(u"stationId", u"station ID", u"Restrict the instanciation to a single station", metavar="stationid", optional=True),
]
docCallSyntax = CallSyntax(docArgs, u"?{0}")
docRequiredPrivilegues = u"instanciate"
def __init__(self, model):
super(InstanciateSchedulesAPI, self).__init__(model)
self.allowedMethods = frozenset(("POST", "GET", "HEAD"))
def handle(self, trans):
stationId = self.getQueryIntDefault("stationId", None, "must be integer")
trans.set_content_type(ContentType("text/plain", self.encoding))
if self.head:
return
if trans.get_request_method() == "GET":
print >>self.out, u"failed: Call this resource with POST to perform instanciation.".encode(self.encoding)
return
generatedUntil = 0
if stationId is None:
generatedUntil = self.priyomInterface.scheduleMaintainer.updateSchedules(None)
else:
generatedUntil = self.priyomInterface.scheduleMaintainer.updateSchedule(self.store.get(Station, stationId), None)
print >>self.out, u"success: valid until {0}".format(datetime.fromtimestamp(generatedUntil).strftime(priyomdate)).encode(self.encoding)
|
gpl-3.0
| -271,381,137,207,109,060
| 37.161765
| 143
| 0.721773
| false
| 3.861607
| false
| false
| false
|
rzarzynski/tempest
|
tempest/services/image/v2/json/image_client.py
|
1
|
7421
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
import jsonschema
from tempest_lib import exceptions as lib_exc
from tempest.common import glance_http
from tempest.common import service_client
class ImageClientV2JSON(service_client.ServiceClient):
def __init__(self, auth_provider, catalog_type, region, endpoint_type=None,
build_interval=None, build_timeout=None,
disable_ssl_certificate_validation=None, ca_certs=None,
**kwargs):
super(ImageClientV2JSON, self).__init__(
auth_provider,
catalog_type,
region,
endpoint_type=endpoint_type,
build_interval=build_interval,
build_timeout=build_timeout,
disable_ssl_certificate_validation=(
disable_ssl_certificate_validation),
ca_certs=ca_certs,
**kwargs)
self._http = None
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
def _get_http(self):
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=self.dscv,
ca_certs=self.ca_certs)
def _validate_schema(self, body, type='image'):
if type in ['image', 'images']:
schema = self.get_schema(type)
else:
raise ValueError("%s is not a valid schema type" % type)
jsonschema.validate(body, schema)
@property
def http(self):
if self._http is None:
self._http = self._get_http()
return self._http
def update_image(self, image_id, patch):
data = json.dumps(patch)
self._validate_schema(data)
headers = {"Content-Type": "application/openstack-images-v2.0"
"-json-patch"}
resp, body = self.patch('v2/images/%s' % image_id, data, headers)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def create_image(self, name, container_format, disk_format, **kwargs):
params = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
for option in kwargs:
value = kwargs.get(option)
if isinstance(value, dict) or isinstance(value, tuple):
params.update(value)
else:
params[option] = value
data = json.dumps(params)
self._validate_schema(data)
resp, body = self.post('v2/images', data)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_image(self, image_id):
url = 'v2/images/%s' % image_id
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def image_list(self, params=None):
url = 'v2/images'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
self._validate_schema(body, type='images')
return service_client.ResponseBodyList(resp, body['images'])
def get_image(self, image_id):
url = 'v2/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.get_image(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image'
def store_image(self, image_id, data):
url = 'v2/images/%s/file' % image_id
headers = {'Content-Type': 'application/octet-stream'}
resp, body = self.http.raw_request('PUT', url, headers=headers,
body=data)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def get_image_file(self, image_id):
url = 'v2/images/%s/file' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBodyData(resp, body)
def add_image_tag(self, image_id, tag):
url = 'v2/images/%s/tags/%s' % (image_id, tag)
resp, body = self.put(url, body=None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def delete_image_tag(self, image_id, tag):
url = 'v2/images/%s/tags/%s' % (image_id, tag)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def get_image_membership(self, image_id):
url = 'v2/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_member(self, image_id, member_id):
url = 'v2/images/%s/members' % image_id
data = json.dumps({'member': member_id})
resp, body = self.post(url, data)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_member_status(self, image_id, member_id, status):
"""Valid status are: ``pending``, ``accepted``, ``rejected``."""
url = 'v2/images/%s/members/%s' % (image_id, member_id)
data = json.dumps({'status': status})
resp, body = self.put(url, data)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_member(self, image_id, member_id):
url = 'v2/images/%s/members/%s' % (image_id, member_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def remove_member(self, image_id, member_id):
url = 'v2/images/%s/members/%s' % (image_id, member_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def get_schema(self, schema):
url = 'v2/schemas/%s' % schema
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
|
apache-2.0
| 8,774,955,404,667,336,000
| 35.55665
| 79
| 0.599245
| false
| 3.788157
| false
| false
| false
|
google/offline-content-packager
|
third_party/nkata/scripts/utils/ISOconverter.py
|
1
|
2430
|
# Copyright 2015 The Offline Content Packager Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ISO converter script.
"""
import logging
from os import link
from os import makedirs
from os import system
from os import unlink
from os.path import dirname
from os.path import isdir
from os.path import isfile
from os.path import join
from sys import platform
from tempfile import mkdtemp
import click
def to_iso(source, destination, filelist=None):
"""ISO converter utility.
Convert contents to ISO format checking the systems platform where the tool
is being run.
Args:
source: path to directory with content to be converted
destination: path to destination where the ISO file is written
filelist: TBD
"""
# overwrite existing ISO file
if isfile(destination):
unlink(destination)
if filelist:
# create tmp dir
tmpdir = mkdtemp()
for item in filelist:
rel = item[len(source)+1:]
dst = join(tmpdir, rel)
if not isdir(dirname(dst)):
makedirs(dirname(dst))
if not isdir(item):
link(item, dst)
source = tmpdir
if platform.startswith("darwin"):
system("hdiutil makehybrid -iso -joliet -o %s %s"%(destination, source))
click.echo("Finished!")
elif platform.startswith("linux"):
system("mkisofs -r -J -o %s %s"%(destination, source))
click.echo("Finished!")
else:
click.echo(platform + (" not supported for converting to ISO files."
"Try to download ISO maker tool from "
"'http://www.magiciso.com/tutorials/"
"miso-iso-creator.htm'"))
logging.debug(platform + (" not supported for converting to ISO files."
"Try to download ISO maker tool from "
"'http://www.magiciso.com/tutorials/"
"miso-iso-creator.htm'"))
|
apache-2.0
| 9,064,200,964,084,471,000
| 30.973684
| 77
| 0.665432
| false
| 4.255692
| false
| false
| false
|
jaantoots/bridgeview
|
render/textures.py
|
1
|
4059
|
"""Provide methods for texturing the scene for rendering."""
import json
import numpy as np
import bpy # pylint: disable=import-error
from . import helpers
class Textures():
"""Identify parts by name, organise into texturing groups and texture.
Initialise with list of objects to be textured.
Run: read groups and textures from JSON file & call `texture` to
assign (random) textures to objects
Test or setup: group parts to always have the same texture, add
available textures to groups (or ungrouped parts) & write groups
and textures to JSON file
"""
def __init__(self, objects: list):
"""Create Textures object for Blender objects list."""
self.objects = objects[:]
self.textures = helpers.Dict()
self.groups = helpers.Dict()
def read(self, texture_file: str):
"""Read texturing from file."""
with open(texture_file) as file:
data = json.load(file)
self.textures = helpers.Dict(data['textures'])
self.groups = helpers.Dict(data['groups'])
def write(self, texture_file: str):
"""Write texturing to file."""
with open(texture_file, 'w') as file:
data = {'textures': self.textures, 'groups': self.groups}
json.dump(data, file)
def smart_project_all(self):
"""Initialize objects for texturing using UV smart project (for testing only).
Usually need to prepare the model by choosing the best
projection for each part manually. Cube projection seems to
work well most of the time.
"""
for obj in self.objects:
bpy.data.scenes[0].objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.uv.smart_project()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.data.scenes[0].objects.active = None
def cube_project_all(self):
"""Initialize objects for texturing using cube project.
Usually need to prepare the model by choosing the best
projection for each part manually. Cube projection seems to
work well most of the time.
"""
for obj in self.objects:
print(obj.name)
bpy.data.scenes[0].objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.uv.cube_project()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.data.scenes[0].objects.active = None
def add_textures(self, group: str, textures: list):
"""Add available textures to group (or part if no group).
It is possible to add multiple textures per group to have one
chosen randomly when textures are applied to objects.
"""
self.textures[group] += textures
def add_parts_to_group(self, group: str, parts: list):
"""Assign parts to belong in a group that gets textured the same."""
self.groups[group] += parts
def texture(self):
"""Texture all objects (assumes all parts have been UV projected)."""
for group, textures in self.textures.items():
texture = np.random.choice(textures)
if group in self.groups:
for part in self.groups[group]:
self._texture_parts(part, texture)
else:
self._texture_parts(group, texture)
def _texture_parts(self, part: str, texture: str):
"""Texture all instances of a part, or all objects if part is ''."""
instances = helpers.all_instances(part, self.objects)
for obj in instances:
texture_object(obj, texture)
def texture_object(obj, texture: str):
"""Texture an object with texture.
Find a material with the name `texture` and make this the active
material of the object
"""
material = bpy.data.materials[texture]
# Assign the material to object
for _ in range(len(obj.material_slots)):
bpy.ops.object.material_slot_remove({'object': obj})
obj.data.materials.clear()
obj.active_material = material
|
gpl-3.0
| 7,488,986,955,036,146,000
| 34.605263
| 86
| 0.626016
| false
| 4.193182
| false
| false
| false
|
woodymit/millstone
|
genome_designer/genome_finish/insertion_placement_read_trkg.py
|
1
|
22010
|
from collections import defaultdict
import os
import pickle
import re
import subprocess
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from django.conf import settings
import pysam
from genome_finish.contig_display_utils import Junction
from genome_finish.jbrowse_genome_finish import add_contig_reads_bam_track
from genome_finish.jbrowse_genome_finish import maybe_create_reads_to_contig_bam
from main.models import Dataset
from pipeline.read_alignment_util import ensure_bwa_index
from pipeline.read_alignment_util import has_bwa_index
from utils.bam_utils import index_bam
from utils.bam_utils import sort_bam_by_coordinate
from utils.import_util import add_dataset_to_entity
ENDPOINT_MODE_DIFFERENCE_FACTOR_CUTOFF = 0.5
REVERSED_COMPLEMENTARITY_FRACTION_CUTOFF = 0.75
ENDPOINT_FRACTION = 0.8
def get_insertion_placement_positions(contig, strategy='all_reads'):
def _get_contig_reads_using_strategy(strategy):
if strategy == 'all_reads':
return extract_contig_reads(contig, 'all')
elif strategy == 'mapped_mates_of_unmapped':
return mapped_mates_of_unmapped_reads(contig)
else:
raise Exception(str(strategy) + ' not a recognized strategy')
contig_reads = _get_contig_reads_using_strategy(strategy)
if len(contig_reads) == 0:
return {'error_string':
'No clipped reads were assembled into the contig'}
contig_reads_dataset_exists = bool(
contig.dataset_set.filter(
type=Dataset.TYPE.BWA_SV_INDICANTS).count())
if strategy == 'all_reads' and not contig_reads_dataset_exists:
make_contig_reads_dataset(contig, contig_reads)
# Add bam track
add_contig_reads_bam_track(contig, Dataset.TYPE.BWA_SV_INDICANTS)
# Align extracted reads to contig, check if assembled as reverse
# complement relative to the reference
maybe_create_reads_to_contig_bam(contig)
reads_to_contig_bam = contig.dataset_set.get(
type=Dataset.TYPE.BWA_ALIGN).get_absolute_location()
reads_to_contig_dict = dictify(pysam.AlignmentFile(reads_to_contig_bam))
reads_to_ref_dict = dictify(contig_reads)
is_reverse = is_contig_reverse_complement(reads_to_ref_dict,
reads_to_contig_dict)
contig.metadata['is_reverse'] = is_reverse
if is_reverse:
write_contig_reverse_complement(contig)
extracted_clipped_read_dicts = extract_left_and_right_clipped_read_dicts(
contig_reads)
left_clipped = extracted_clipped_read_dicts['left_clipped']
right_clipped = extracted_clipped_read_dicts['right_clipped']
# Right clipped reads indicate left endpoint
left_ref_endpoints = get_top_clipped_locs(right_clipped)
# Left clipped reads indicate right endpoint
right_ref_endpoints = get_top_clipped_locs(left_clipped)
left_junctions = []
for ref_endpoint, ref_count in left_ref_endpoints:
contig_endpoint, contig_count = find_contig_endpoint(
contig, right_clipped[ref_endpoint], 'right')
left_junctions.append(Junction(
ref_endpoint, ref_count, contig_endpoint, contig_count))
right_junctions = []
for ref_endpoint, ref_count in right_ref_endpoints:
contig_endpoint, contig_count = find_contig_endpoint(
contig, left_clipped[ref_endpoint], 'left')
right_junctions.append(Junction(
ref_endpoint, ref_count, contig_endpoint, contig_count))
contig.metadata['left_junctions'] = left_junctions
contig.metadata['right_junctions'] = right_junctions
contig.metadata['potential_reference_endpoints'] = {
'left': left_ref_endpoints,
'right': right_ref_endpoints
}
contig.save()
ref_insertion_endpoints = {}
if are_ref_endpoints_placeable(left_ref_endpoints):
ref_insertion_endpoints['left'] = left_ref_endpoints[0][0]
else:
ref_insertion_endpoints['left'] = None
if are_ref_endpoints_placeable(right_ref_endpoints):
ref_insertion_endpoints['right'] = right_ref_endpoints[0][0]
else:
ref_insertion_endpoints['right'] = None
# Handle case of no endpoints found
error = None
if (not ref_insertion_endpoints['left'] and
not ref_insertion_endpoints['right']):
error = {'error_string': ('Could not find left or right reference ' +
'insertion endpoints using ' + str(len(contig_reads)) +
' clipped reads')}
elif not ref_insertion_endpoints['left']:
error = {'error_string': ('Could not find left reference ' +
'insertion endpoint using ' + str(len(contig_reads)) +
' clipped reads')}
elif not ref_insertion_endpoints['right']:
error = {'error_string': ('Could not find right reference ' +
'insertion endpoint using ' + str(len(contig_reads)) +
' clipped reads')}
elif (ref_insertion_endpoints['left'] - ref_insertion_endpoints['right'] >
0.5 * contig.num_bases):
error = {'error_string': ('Left insertion endpoint found too far ' +
'before right insertion endpoint')}
elif (ref_insertion_endpoints['right'] - ref_insertion_endpoints['left'] >
10 * contig.num_bases):
error = {'error_string': ('Distance between left and right ' +
'reference insertion endpoints more than 10x contig' +
'length')}
if error:
return error
left_clipped_same_end = left_clipped[ref_insertion_endpoints['right']]
right_clipped_same_end = right_clipped[ref_insertion_endpoints['left']]
contig_insertion_endpoints = find_contig_insertion_endpoints(
contig, left_clipped_same_end,
right_clipped_same_end)
# Propogate error upwards
if 'error_string' in contig_insertion_endpoints:
return contig_insertion_endpoints
if contig_insertion_endpoints['left'] is None:
return {'error_string': ('Could not find left contig endpoint')}
if contig_insertion_endpoints['right'] is None:
return {'error_string': ('Could not find right contig endpoint')}
# Set contig metadata fields and return endpoints
insertion_placement_positions = {
'reference': ref_insertion_endpoints,
'contig': contig_insertion_endpoints
}
contig.metadata['contig_insertion_endpoints'] = (
insertion_placement_positions['contig']['left'],
insertion_placement_positions['contig']['right'])
contig.metadata['reference_insertion_endpoints'] = (
insertion_placement_positions['reference']['left'],
insertion_placement_positions['reference']['right'])
contig.save()
return insertion_placement_positions
def mapped_mates_of_unmapped_reads(contig):
unmapped_contig_reads = extract_contig_reads(
contig, read_category='unmapped')
print len(unmapped_contig_reads), 'unmapped reads in contig'
original_align = contig.experiment_sample_to_alignment.dataset_set.get(
type=Dataset.TYPE.BWA_ALIGN).get_absolute_location()
original_alignmentfile = pysam.AlignmentFile(original_align)
found_mates = []
for read in unmapped_contig_reads:
if not read.mate_is_unmapped:
mate = original_alignmentfile.mate(read)
found_mates.append(mate)
original_alignmentfile.close()
print len(found_mates), 'mapped mates found'
return found_mates
def dictify(reads_iterator):
id_to_reads = defaultdict(list)
for read in reads_iterator:
id_to_reads[read.qname].append(read)
return id_to_reads
def only_primary(reads):
return [read for read in reads if not
(read.is_supplementary or read.is_secondary)]
def is_contig_reverse_complement(reads_to_ref_dict, reads_to_contig_dict):
direction_agreement = 0
direction_disagreement = 0
for qname, reads in reads_to_ref_dict.items():
reads = only_primary(reads)
if all([read.is_unmapped for read in reads]):
continue
same_reads_to_contig = only_primary(
reads_to_contig_dict[reads[0].qname])
for read in reads:
if read.is_unmapped:
continue
if read.is_read1:
correspondant = next((read for read in same_reads_to_contig
if read.is_read1), None)
else:
correspondant = next((read for read in same_reads_to_contig
if read.is_read2), None)
if correspondant:
if read.is_reverse == correspondant.is_reverse:
direction_agreement += 1
else:
direction_disagreement += 1
if not (direction_agreement or direction_disagreement):
return False
return (direction_disagreement / (direction_disagreement +
direction_agreement) > REVERSED_COMPLEMENTARITY_FRACTION_CUTOFF)
def extract_contig_reads(contig, read_category='all'):
READ_CATEGORY_TO_FILENAME_DICT = {
'without_mates': 'bwa_align.SV_indicants_no_dups.bam',
'clipped': 'bwa_align.clipped.bam',
'split': 'bwa_align.split.bam',
'unmapped': 'bwa_align.unmapped.bam'
}
def _read_category_to_filename(read_category):
if read_category in READ_CATEGORY_TO_FILENAME_DICT:
return READ_CATEGORY_TO_FILENAME_DICT[read_category]
elif read_category == 'all':
assembly_metadata_file = os.path.join(
contig.metadata['assembly_dir'],
'metadata.txt')
with open(assembly_metadata_file) as fh:
assembly_metadata_obj = pickle.load(fh)
return assembly_metadata_obj['sv_indicants_bam']
elif read_category == 'mates_of_unmapped':
return mapped_mates_of_unmapped_reads(contig)
else:
raise Exception('read category not recognized')
extract_contig_reads_executable = os.path.join(
settings.TOOLS_DIR,
'velvet/extractContigReads.pl')
assembly_dir = contig.metadata['assembly_dir']
contig_node_number = contig.metadata['node_number']
cmd = [extract_contig_reads_executable, str(contig_node_number),
assembly_dir]
cmd = ' '.join(cmd)
contig_reads_fasta = os.path.join(
contig.get_model_data_dir(),
'extracted_reads.fa')
if not os.path.exists(contig_reads_fasta):
with open(contig_reads_fasta, 'w') as fh:
subprocess.call(cmd, shell=True, stdout=fh)
p1 = re.compile('>(\S+)/(\d)')
contig_reads = defaultdict(list)
with open(contig_reads_fasta) as fh:
for line in fh:
m1 = p1.match(line)
if m1:
read_id = m1.group(1)
read_number = int(m1.group(2))
contig_reads[read_id].append(read_number)
sv_indicant_reads_path = os.path.join(
contig.experiment_sample_to_alignment.get_model_data_dir(),
_read_category_to_filename(read_category))
sam_file = pysam.AlignmentFile(sv_indicant_reads_path)
sv_indicant_reads_in_contig = []
for read in sam_file:
if read.is_read1:
read_number = 1
elif read.is_read2:
read_number = 2
else:
raise Exception('Read is neither read1 nor read2')
contig_read_numbers = contig_reads.get(read.query_name, [])
if read_number in contig_read_numbers:
sv_indicant_reads_in_contig.append(read)
# HACK: Set chromosome here while sam file is open
# so AlignmentFile.getrname(tid) can be called
ref_id_to_count = {}
mapped_count = 0
for read in sv_indicant_reads_in_contig:
if not read.is_unmapped:
mapped_count += 1
if read.reference_id not in ref_id_to_count:
ref_id_to_count[read.reference_id] = 1
else:
ref_id_to_count[read.reference_id] += 1
if mapped_count:
tid_count_sorted = sorted(
ref_id_to_count.items(), key=lambda x: x[1], reverse=True)
mode_chrom_tid = tid_count_sorted[0][0]
mode_chrom_percentage = (tid_count_sorted[0][1] /
float(mapped_count))
# Set field
if mode_chrom_percentage > 0.8:
contig_seqrecord_id = sam_file.getrname(mode_chrom_tid)
contig.metadata['chromosome'] = contig_seqrecord_id
contig.save()
sam_file.close()
return sv_indicant_reads_in_contig
def make_contig_reads_dataset(contig, sv_indicant_reads_in_contig):
# Get bam filename
extracted_reads_bam_file = os.path.join(
contig.get_model_data_dir(),
'sv_indicants.bam')
bwa_align_bam = contig.experiment_sample_to_alignment.dataset_set.get(
type=Dataset.TYPE.BWA_ALIGN).get_absolute_location()
sam_file = pysam.AlignmentFile(bwa_align_bam)
# Write extracted reads into bam file
extracted_reads_alignment_file = pysam.AlignmentFile(
extracted_reads_bam_file, "wb", template=sam_file)
sam_file.close()
for read in sv_indicant_reads_in_contig:
extracted_reads_alignment_file.write(read)
extracted_reads_alignment_file.close()
coordinate_sorted_bam = (os.path.splitext(extracted_reads_bam_file)[0] +
'.coordinate_sorted.bam')
sort_bam_by_coordinate(extracted_reads_bam_file, coordinate_sorted_bam)
index_bam(coordinate_sorted_bam)
# Add the bam file to contig as BWA_SV_INDICANTS dataset, overwriting it
# if it already exists
dataset_query = contig.dataset_set.filter(
type=Dataset.TYPE.BWA_SV_INDICANTS)
if dataset_query.count():
dataset_query[0].delete()
add_dataset_to_entity(contig,
Dataset.TYPE.BWA_SV_INDICANTS,
Dataset.TYPE.BWA_SV_INDICANTS,
filesystem_location=coordinate_sorted_bam)
def extract_left_and_right_clipped_read_dicts(sv_indicant_reads_in_contig,
clipping_threshold=0):
SOFT_CLIP = 4
HARD_CLIP = 5
CLIP = [SOFT_CLIP, HARD_CLIP]
# Separate left and right clipped reads
left_clipped = defaultdict(list)
right_clipped = defaultdict(list)
for read in sv_indicant_reads_in_contig:
if read.cigartuples is not None:
left_clipping = (read.cigartuples[0][1]
if read.cigartuples[0][0] in CLIP else 0)
right_clipping = (read.cigartuples[-1][1]
if read.cigartuples[-1][0] in CLIP else 0)
if max(left_clipping, right_clipping) > clipping_threshold:
is_left_clipped = left_clipping > right_clipping
is_right_clipped = right_clipping > left_clipping
if is_left_clipped:
left_clipped[read.reference_start].append(read)
elif is_right_clipped:
right_clipped[read.reference_end].append(read)
return {
'left_clipped': left_clipped,
'right_clipped': right_clipped
}
def are_ref_endpoints_placeable(endpoints):
"""endpoints is a list of tuples of the form
(loc, clipped_read_count) sorted by decreasing clipped_key_count
"""
first = endpoints[0][1] if len(endpoints) > 0 else 0
second = endpoints[1][1] if len(endpoints) > 1 else 0
if not first * (1 - ENDPOINT_MODE_DIFFERENCE_FACTOR_CUTOFF) > second:
return False
return True
def get_top_clipped_locs(clipped_dict):
"""clipped_dict is a dictionary with clipping locations as
keys and a list of reads as values
"""
# Convert the dictionary into a list of tuples of the form
# (loc, #reads) sorted in decreasing order of #reads
clipped_count_list = sorted(
[(loc, len(reads)) for loc, reads in clipped_dict.items()],
key=lambda t: t[1], reverse=True)
# Count up the total number of reads
total = sum(count for loc, count in clipped_count_list)
# Return the list that comprises ENDPOINT_FRACTION of the total reads
included = 0
i = 0
while included < ENDPOINT_FRACTION * total:
included += clipped_count_list[i][1]
i += 1
return clipped_count_list[:i]
def write_read_query_alignments_to_fastq(reads, fastq_path,
read_attr_class='query_alignment'):
"""Writes the aligned portion of each read into a fastq
"""
read_attr_funcs = {
'query_alignment': {
'seq': lambda x: x.query_alignment_sequence,
'qual': lambda x: x.query_alignment_qualities
},
'query': {
'seq': lambda x: x.query_sequence,
'qual': lambda x: x.query_qualities
}
}
assert read_attr_class in read_attr_funcs
get_read_attr = read_attr_funcs[read_attr_class]
query_alignment_seqrecords = []
for read in reads:
query_alignment_seqrecords.append(SeqRecord(
Seq(get_read_attr['seq'](read), IUPAC.ambiguous_dna),
letter_annotations={
'phred_quality': get_read_attr['qual'](read)},
id=read.query_name,
description=''))
with open(fastq_path, 'w') as fastq_handle:
SeqIO.write(query_alignment_seqrecords, fastq_handle, 'fastq')
def simple_align_with_bwa_mem(reads_fq, reference_fasta, output_bam_path):
# Assert reference fasta is indexed
assert has_bwa_index(reference_fasta)
# Align clipped query alignment fastq to contig
align_input_args = ' '.join([
'%s/bwa/bwa' % settings.TOOLS_DIR,
'mem',
reference_fasta,
reads_fq])
# Bwa mem calls reads clipped slightly at the end of the genome
# as unmapped, so filter these out with -F 0x004
# To skip saving the SAM file to disk directly, pipe output directly to
# make a BAM file.
align_input_args += (' | ' + settings.SAMTOOLS_BINARY +
' view -F 0x004 -bS -')
# Run alignment
with open(output_bam_path, 'w') as fh:
subprocess.check_call(
align_input_args, stdout=fh,
shell=True, executable=settings.BASH_PATH)
def get_reads_with_mode_attribute(clipped_alignment_bam, get_attr_function):
alignment_ref_clip_positions = defaultdict(list)
sam_file = pysam.AlignmentFile(clipped_alignment_bam)
for read in sam_file:
alignment_ref_clip_positions[get_attr_function(read)].append(read)
alignment_ref_clip_positions_sorted = sorted(
alignment_ref_clip_positions.items(),
key=lambda x: len(x[1]), reverse=True)
highest_consensus = (len(alignment_ref_clip_positions_sorted[0][1])
if len(alignment_ref_clip_positions_sorted) > 0 else 0)
second_highest_consensus = (len(alignment_ref_clip_positions_sorted[1][1])
if len(alignment_ref_clip_positions_sorted) > 1 else 0)
if (highest_consensus - second_highest_consensus >
(ENDPOINT_MODE_DIFFERENCE_FACTOR_CUTOFF *
highest_consensus)):
endpoint = (alignment_ref_clip_positions_sorted[0][0],
highest_consensus)
else:
endpoint = None, None
return endpoint
def get_contig_rc_fasta_path(contig):
contig_fasta = contig.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
return (os.path.splitext(contig_fasta)[0] +
'.reverse_complement.fa')
def write_contig_reverse_complement(contig):
contig_fasta = contig.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
rc_contig_fasta = get_contig_rc_fasta_path(contig)
contig_seqrecord = SeqIO.parse(contig_fasta, 'fasta').next()
contig_seqrecord.seq = contig_seqrecord.seq.reverse_complement()
SeqIO.write(contig_seqrecord, rc_contig_fasta, 'fasta')
return rc_contig_fasta
def find_contig_endpoint(contig, clipped_same_end, direction):
assert direction in ['left', 'right']
# Write clipped query alignment sequences to fastq
contig_dir = contig.get_model_data_dir()
clipped_query_alignment_fq = os.path.join(
contig_dir,
'clipped_query_alignment_seqs.fq')
write_read_query_alignments_to_fastq(
clipped_same_end,
clipped_query_alignment_fq)
# Get BAM filename for alignment
clipped_to_contig_bam = os.path.join(
contig_dir,
'clipped_to_contig.bwa_align.bam')
# Get contig fasta
contig_fasta = contig.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
if contig.is_reverse:
align_to = get_contig_rc_fasta_path(contig)
else:
align_to = contig_fasta
if align_to:
ensure_bwa_index(align_to)
simple_align_with_bwa_mem(
clipped_query_alignment_fq, align_to,
clipped_to_contig_bam)
# Find contig endpoints
if direction == 'right':
return get_reads_with_mode_attribute(
clipped_to_contig_bam, lambda r: r.reference_end)
else:
return get_reads_with_mode_attribute(
clipped_to_contig_bam, lambda r: r.reference_start)
def find_contig_insertion_endpoints(contig,
left_clipped_same_end, right_clipped_same_end):
""" left_clipped_same_end/right_clipped_same_end are lists of
left and right clipped reads all with the same left/right
alignment endpoint, corresponding to the reference insertion
right/left endpoint
"""
contig_ins_left_end, _ = find_contig_endpoint(contig,
right_clipped_same_end, 'right')
contig_ins_right_end, _ = find_contig_endpoint(contig,
left_clipped_same_end, 'left')
return {
'left': contig_ins_left_end,
'right': contig_ins_right_end
}
|
mit
| -2,248,555,978,764,441,900
| 35.500829
| 80
| 0.636256
| false
| 3.63922
| false
| false
| false
|
thinkle/gourmet
|
gourmet/plugins/key_editor/keyEditorPluggable.py
|
1
|
3327
|
# This library provides a pluggable that lets plugins that *use* our
# key editor to provide extra information based on the ingredient
# key. This will be used to show info in both the key editor and
# recipe card view and possibly to allow editing etc.
from gourmet.plugin_loader import Pluggable
from gourmet.plugin import PluginPlugin
from gourmet import gdebug
# Here's our template -- those implementing will have to take this as
# boilerplate code rather than subclassing it, since it's not possible
# to reliably access one plugin's module from another.
# Begin boilerplate...
#
# For a fuller example, see shopping_associations
class KeyEditorPlugin (PluginPlugin):
target_pluggable = 'KeyEditorPlugin'
selected_ingkeys = []
def setup_treeview_column (self, ike, key_col, instant_apply=False):
'''Set up a treeview column to display your data.
The key_col is the column in the treemodel which will contain
your data in the model. It\'s your responsibility to get
whatever other data you need yourself.
If you make this editable, it\'s up to you to apply the
changes as well to the database. If instant_apply is True,
then apply them instantly; if False, apply them when this
class\'s save method is called.
'''
raise NotImplementedError
def save (self):
'''Save any data the user has entered in your treeview column.
'''
pass
def offers_edit_widget (self):
'''Return True if this plugin provides an edit button for
editing data (if you need more than an editable cellrenderer
to let users edit your data, or would like to act on multiple
rows.
'''
return False
def setup_edit_widget (self):
'''Return an edit button to let users edit your data.
'''
raise NotImplementedError
def selection_changed (self, ingkeys):
'''Selected ingkeys have changed -- currently ingkeys are
selected (and should be acted on by our edit_widget
'''
self.selected_ingkeys = ingkeys
# End boilerplate
class KeyEditorPluginManager (Pluggable):
'''Manage plugins that provide users the ability to edit extra
associations, such as nutritional information, shopping list
categories, etc.'''
title = 'Title of Whatever we Do'
targets = ['KeyEditorPlugin']
__single = None
@classmethod
def instance(cls):
if KeyEditorPluginManager.__single is None:
KeyEditorPluginManager.__single = cls()
return KeyEditorPluginManager.__single
def __init__ (self):
Pluggable.__init__(self,[PluginPlugin])
def get_treeview_columns (self, ike, key_col, instant_apply=False):
return [p.setup_treeview_column(ike, key_col,instant_apply) for p in self.plugins]
def get_edit_buttons (self, ike):
buttons = []
for p in self.plugins:
if p.offer_edit_button():
try:
buttons.append(p.setup_edit_button())
except:
'Trouble initializing edit button for plugin',p
import traceback; traceback.print_exc()
return buttons
def get_key_editor_plugin_manager ():
return KeyEditorPluginManager.instance()
|
gpl-2.0
| -7,379,759,485,367,620,000
| 32.606061
| 90
| 0.661858
| false
| 4.320779
| false
| false
| false
|
SetBased/py-etlt
|
etlt/dimension/Type2ReferenceDimension.py
|
1
|
5466
|
"""
ETLT
Copyright 2016 Set Based IT Consultancy
Licence MIT
"""
import abc
import datetime
class Type2ReferenceDimension(metaclass=abc.ABCMeta):
"""
Abstract class for type2 dimensions for which the reference data is supplied with date intervals.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Object constructor.
"""
self._key_key = ''
"""
The key in the dict returned by call_stored_procedure holding the technical ID.
:type: str
"""
self._key_date_start = ''
"""
The key in the dict returned by call_stored_procedure holding the start date.
:type: str
"""
self._key_date_end = ''
"""
The key in the dict returned by call_stored_procedure holding the end date.
:type: str
"""
self._map = {}
"""
The map from natural keys to lists of tuples with start date, end date, and technical keys. The dates must be in
ISO 8601 (YYYY-MM-DD) format.
:type: dict[T, list[(str,str,int|None)]]
"""
# Pre-load look up data in to the map.
self.pre_load_data()
# ------------------------------------------------------------------------------------------------------------------
def get_id(self, natural_key, date, enhancement=None):
"""
Returns the technical ID for a natural key at a date or None if the given natural key is not valid.
:param T natural_key: The natural key.
:param str date: The date in ISO 8601 (YYYY-MM-DD) format.
:param T enhancement: Enhancement data of the dimension row.
:rtype: int|None
"""
if not date:
return None
# If the natural key is known return the technical ID immediately.
if natural_key in self._map:
for row in self._map[natural_key]:
if row[0] <= date <= row[1]:
return row[2]
# The natural key is not in the map of this dimension. Call a stored procedure for translating the natural key
# to a technical key.
self.pre_call_stored_procedure()
success = False
try:
row = self.call_stored_procedure(natural_key, date, enhancement)
# Convert dates to strings in ISO 8601 format.
if isinstance(row[self._key_date_start], datetime.date):
row[self._key_date_start] = row[self._key_date_start].isoformat()
if isinstance(row[self._key_date_end], datetime.date):
row[self._key_date_end] = row[self._key_date_end].isoformat()
success = True
finally:
self.post_call_stored_procedure(success)
# Make sure the natural key is in the map.
if natural_key not in self._map:
self._map[natural_key] = []
if row[self._key_key]:
self._map[natural_key].append((row[self._key_date_start],
row[self._key_date_end],
row[self._key_key]))
else:
self._map[natural_key].append((date, date, None))
return row[self._key_key]
# ------------------------------------------------------------------------------------------------------------------
@abc.abstractmethod
def call_stored_procedure(self, natural_key, date, enhancement):
"""
Call a stored procedure for getting the technical key of a natural key at a date. Returns the technical ID or
None if the given natural key is not valid.
:param T natural_key: The natural key.
:param str date: The date in ISO 8601 (YYYY-MM-DD) format.
:param T enhancement: Enhancement data of the dimension row.
:rtype: dict
"""
raise NotImplementedError()
# ------------------------------------------------------------------------------------------------------------------
def pre_load_data(self):
"""
Can be overridden to pre-load lookup data from a dimension table.
:rtype: None
"""
pass
# ------------------------------------------------------------------------------------------------------------------
def pre_call_stored_procedure(self):
"""
This method is invoked before call the stored procedure for getting the technical key of a natural key.
In a concurrent environment override this method to acquire a lock on the dimension or dimension hierarchy.
:rtype: None
"""
pass
# ------------------------------------------------------------------------------------------------------------------
def post_call_stored_procedure(self, success):
"""
This method is invoked after calling the stored procedure for getting the technical key of a natural key.
In a concurrent environment override this method to release a lock on the dimension or dimension hierarchy and
to commit or rollback the transaction.
:param bool success: True: the stored procedure is executed successfully. False: an exception has occurred.
:rtype: None
"""
pass
# ----------------------------------------------------------------------------------------------------------------------
|
mit
| -5,448,622,367,667,991,000
| 34.960526
| 120
| 0.497988
| false
| 4.973612
| false
| false
| false
|
ArcherSys/ArcherSys
|
skulpt/src/lib/pythonds/trees/bst.py
|
1
|
8740
|
#!/bin/env python3.1
# Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005, 2010
#
class BinarySearchTree:
'''
Author: Brad Miller
Date: 1/15/2005
Description: Imlement a binary search tree with the following interface
functions:
__contains__(y) <==> y in x
__getitem__(y) <==> x[y]
__init__()
__len__() <==> len(x)
__setitem__(k,v) <==> x[k] = v
clear()
get(k)
items()
keys()
values()
put(k,v)
in
del <==>
'''
def __init__(self):
self.root = None
self.size = 0
def put(self,key,val):
if self.root:
self._put(key,val,self.root)
else:
self.root = TreeNode(key,val)
self.size = self.size + 1
def _put(self,key,val,currentNode):
if key < currentNode.key:
if currentNode.hasLeftChild():
self._put(key,val,currentNode.leftChild)
else:
currentNode.leftChild = TreeNode(key,val,parent=currentNode)
else:
if currentNode.hasRightChild():
self._put(key,val,currentNode.rightChild)
else:
currentNode.rightChild = TreeNode(key,val,parent=currentNode)
def __setitem__(self,k,v):
self.put(k,v)
def get(self,key):
if self.root:
res = self._get(key,self.root)
if res:
return res.payload
else:
return None
else:
return None
def _get(self,key,currentNode):
if not currentNode:
return None
elif currentNode.key == key:
return currentNode
elif key < currentNode.key:
return self._get(key,currentNode.leftChild)
else:
return self._get(key,currentNode.rightChild)
def __getitem__(self,key):
res = self.get(key)
if res:
return res
else:
raise KeyError('Error, key not in tree')
def __contains__(self,key):
if self._get(key,self.root):
return True
else:
return False
def length(self):
return self.size
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def delete(self,key):
if self.size > 1:
nodeToRemove = self._get(key,self.root)
if nodeToRemove:
self.remove(nodeToRemove)
self.size = self.size-1
else:
raise KeyError('Error, key not in tree')
elif self.size == 1 and self.root.key == key:
self.root = None
self.size = self.size - 1
else:
raise KeyError('Error, key not in tree')
def __delitem__(self,key):
self.delete(key)
def remove(self,currentNode):
if currentNode.isLeaf(): #leaf
if currentNode == currentNode.parent.leftChild:
currentNode.parent.leftChild = None
else:
currentNode.parent.rightChild = None
elif currentNode.hasBothChildren(): #interior
succ = currentNode.findSuccessor()
succ.spliceOut()
currentNode.key = succ.key
currentNode.payload = succ.payload
else: # this node has one child
if currentNode.hasLeftChild():
if currentNode.isLeftChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.leftChild
elif currentNode.isRightChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.leftChild
else:
currentNode.replaceNodeData(currentNode.leftChild.key,
currentNode.leftChild.payload,
currentNode.leftChild.leftChild,
currentNode.leftChild.rightChild)
else:
if currentNode.isLeftChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.rightChild
elif currentNode.isRightChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.rightChild
else:
currentNode.replaceNodeData(currentNode.rightChild.key,
currentNode.rightChild.payload,
currentNode.rightChild.leftChild,
currentNode.rightChild.rightChild)
def inorder(self):
self._inorder(self.root)
def _inorder(self,tree):
if tree != None:
self._inorder(tree.leftChild)
print(tree.key)
self._inorder(tree.rightChild)
def postorder(self):
self._postorder(self.root)
def _postorder(self, tree):
if tree:
self._postorder(tree.rightChild)
self._postorder(tree.leftChild)
print(tree.key)
def preorder(self):
self._preorder(self,self.root)
def _preorder(self,tree):
if tree:
print(tree.key)
self._preorder(tree.leftChild)
self._preorder(tree.rightChild)
class TreeNode:
def __init__(self,key,val,left=None,right=None,parent=None):
self.key = key
self.payload = val
self.leftChild = left
self.rightChild = right
self.parent = parent
self.balanceFactor = 0
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def isLeftChild(self):
return self.parent and self.parent.leftChild == self
def isRightChild(self):
return self.parent and self.parent.rightChild == self
def isRoot(self):
return not self.parent
def isLeaf(self):
return not (self.rightChild or self.leftChild)
def hasAnyChildren(self):
return self.rightChild or self.leftChild
def hasBothChildren(self):
return self.rightChild and self.leftChild
def replaceNodeData(self,key,value,lc,rc):
self.key = key
self.payload = value
self.leftChild = lc
self.rightChild = rc
if self.hasLeftChild():
self.leftChild.parent = self
if self.hasRightChild():
self.rightChild.parent = self
def findSuccessor(self):
succ = None
if self.hasRightChild():
succ = self.rightChild.findMin()
else:
if self.parent:
if self.isLeftChild():
succ = self.parent
else:
self.parent.rightChild = None
succ = self.parent.findSuccessor()
self.parent.rightChild = self
return succ
def spliceOut(self):
if self.isLeaf():
if self.isLeftChild():
self.parent.leftChild = None
else:
self.parent.rightChild = None
elif self.hasAnyChildren():
if self.hasLeftChild():
if self.isLeftChild():
self.parent.leftChild = self.leftChild
else:
self.parent.rightChild = self.leftChild
self.leftChild.parent = self.parent
else:
if self.isLeftChild():
self.parent.leftChild = self.rightChild
else:
self.parent.rightChild = self.rightChild
self.rightChild.parent = self.parent
def findMin(self):
current = self
while current.hasLeftChild():
current = current.leftChild
return current
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
|
mit
| 2,014,519,436,415,015,200
| 30.781818
| 77
| 0.515904
| false
| 4.516796
| false
| false
| false
|
vlegoff/tsunami
|
src/primaires/scripting/actions/desequiper.py
|
1
|
3400
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action desequiper."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
from primaires.objet.conteneur import SurPoids
class ClasseAction(Action):
"""Fait déséquiper un personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.desequiper_objet, "Personnage", "Objet")
@staticmethod
def desequiper_objet(personnage, objet):
"""Force un personnage à déséquiper l'objet précisé.
Cette syntaxe de l'action se rapproche davantage de la commande
**retirer/remove**. Elle demande à un personnage de déséquiper un
objet qu'il équipe. L'objet est ensuite placé dans l'inventaire
du personnage, ou sur le sol si ce n'est pas possible.
Paramètres à préciser :
* personnage : le personnage que l'on souhaite déséquiper
* objet : l'objet que l'on souhaite déséquiper.
Exemple d'utilisation :
sabre = equipe(personnage, "sabre_bois")
desequiper personnage sabre
"""
if objet.contenu is not personnage.equipement.equipes:
raise ErreurExecution("{} n'équipe pas {}".format(
personnage.nom_unique, objet.identifiant))
# Essaye de déséquiper l'objet
try:
personnage.equipement.equipes.retirer(objet)
except ValueError:
raise ErreurExecution("{} ne peut retirer {}".format(
personnage.nom_unique, objet.identifiant))
else:
try:
personnage.ramasser(objet=objet)
except SurPoids:
personnage.equipement.tenir_objet(objet=objet)
objet.script["retire"].executer(objet=objet,
personnage=personnage)
|
bsd-3-clause
| 675,812,069,121,602,700
| 40.195122
| 79
| 0.706927
| false
| 3.667752
| false
| false
| false
|
chrys87/fenrir
|
play zone/detectDevices.py
|
1
|
1911
|
#!/bin/python
import evdev
iDevices = {}
iDeviceNo = 0
def updateInputDevices(force = False, init = False):
global iDeviceNo
if init:
iDevices = {}
iDeviceNo = 0
deviceFileList = evdev.list_devices()
if not force:
if len(deviceFileList) == iDeviceNo:
return
iDeviceNo = len(deviceFileList)
mode = 'ALL'
iDevicesFiles = []
for device in iDevices:
iDevicesFiles.append(iDevices[device].fn)
print(len(iDevicesFiles),len(deviceFileList))
if len(iDevicesFiles) == len(deviceFileList):
return
for deviceFile in deviceFileList:
try:
if deviceFile in iDevicesFiles:
print('skip')
continue
open(deviceFile)
# 3 pos absolute
# 2 pos relative
# 1 Keys
currDevice = evdev.InputDevice(deviceFile)
cap = currDevice.capabilities()
if mode in ['ALL','NOMICE']:
if 1 in cap:
if 116 in cap[1] and len(cap[1]) < 5:
print('power')
continue
if mode == 'ALL':
iDevices[currDevice.fd] = currDevice
print('Device added:' + iDevices[currDevice.fd].name)
elif mode == 'NOMICE':
if not ((2 in cap) or (3 in cap)):
iDevices[currDevice.fd] = currDevice
print('Device added:' + iDevices[currDevice.fd].name)
elif currDevice.name.upper() in mode.split(','):
iDevices[currDevice.fd] = currDevice
print('Device added:' + iDevices[currDevice.fd].name)
except Exception as e:
print("Skip Inputdevice : " + deviceFile +' ' + str(e))
updateInputDevices()
|
lgpl-3.0
| -7,624,252,007,267,087,000
| 37.22
| 88
| 0.508111
| false
| 4.372998
| false
| false
| false
|
rchatterjee/nocrack
|
newcode/honeyvault_config.py
|
1
|
3284
|
# The following dictionaries should be provided to buildcfg.py
# 1: base dictionary //only character words will be considered
# 2: tweak set file
# 3: dictionary with count // PCFG will be built over this
# 4: output PCFG file name/path
# 5: output Trie file name/path
# empty lines and line beginning with '#' will be discarded
# exact dicionary path should be given.
import math
import os
import random
DEBUG = os.environ.get("DEBUG", False)
BASE_DIR = os.getcwd()
thisdir = os.path.dirname(os.path.abspath(__file__))
# DIC_TRIE_FILE = 'data/english.tri'
# DICTIONARY_DAWG = '{}/Dictionary_Store/dictionary1.1.dawg.gz'.format(thisdir)
# STANDARD_DIC_FILE = "{}/Dictionary_Store/standard_english.tri.gz".format(thisdir)
# GRAMMAR_OUTPUT_FILE = "{}/data/combined.gmr.bz2".format(thisdir)
# GRAMMAR_INPUT_FILE = "{}/data/combined.tri.bz2".format(thisdir)
# HANDGRAMMAR_FILE = "{}/data/grammar.txt".format(thisdir)
STATIC_DIR = os.path.join(thisdir, 'static')
TRAINED_GRAMMAR_FILE = os.path.join(STATIC_DIR, 'grammar.cfg.gz')
if DEBUG:
TRAINED_GRAMMAR_FILE += '~orig'
VAULT_DIST_FILE = os.path.join(STATIC_DIR, 'vault_dist.cfg')
# Don't change
EPSILON = '|_|'
GRAMMAR_R = 0
MEMLIMMIT = 1024 # 1024 MB, 1GB
MIN_COUNT = 2
PRODUCTION = 1
NONTERMINAL = 1
TERMINAL = 1 - NONTERMINAL
REPR_SIZE = 4 # number of bytes to represent an integer. normally 4 bytes. But
# we might go for higher values for better security.
MAX_INT = 256 ** REPR_SIZE # value of maximum integer in this representation.
PASSWORD_LENGTH = 100 # length of the password encoding
HONEY_VAULT_GRAMMAR_SIZE = 500 # 400 bytes, 50 integers/rules
# This controls the size of the NoCrack vault. Refer to the Oakland 15 paper
# (NoCrack) for more details. If you change this remember to delete
# static/vault.db to see the effect. Need less to say, you will lose all your
# passwords. Export/import operation are on its way. (TODO: Import-Export
# functions)
HONEY_VAULT_S1 = 1000
HONEY_VAULT_S2 = 1000
HONEY_VAULT_STORAGE_SIZE = HONEY_VAULT_S1 + HONEY_VAULT_S2
# For each password there is 1 byte saying whether the password is m/c or human
# generated. '1' --> m/c or '0' --> human generated pw.
# TODO: move it to more succinct repr, Google's protobuf!
HONEY_VAULT_MACHINE_PASS_SET_SIZE = int(math.ceil(HONEY_VAULT_STORAGE_SIZE / 8))
HONEY_VAULT_ENCODING_SIZE = HONEY_VAULT_GRAMMAR_SIZE + \
HONEY_VAULT_STORAGE_SIZE * PASSWORD_LENGTH
HONEY_VAULT_TOTAL_CIPHER_SIZE = HONEY_VAULT_ENCODING_SIZE + \
int(math.ceil(HONEY_VAULT_MACHINE_PASS_SET_SIZE / 4)) + \
8 # PBKDF1 salt size
SECURITY_PARAM = 16
SECURITY_PARAM_IN_BASE64 = (SECURITY_PARAM * 4) / 3 + 1
# Static domain mapping list
STATIC_DOMAIN_LIST = '{}/server/static_domain_map.txt'.format(thisdir)
STATIC_DOMAIN_HASH_LIST = '{}/static/static_domain_hashes.txt'.format(thisdir)
# Machie generated password probability in set of 1000
MACHINE_GENRATED_PASS_PROB = 10
# Required by honey_client
HONEY_SERVER_URL = "http://localhost:5000/"
VAULT_FILE = 'static/vault.db'
L33T = {
'3': 'e', '4': 'a', '@': 'a',
'$': 's', '0': 'o', '1': 'i',
'z': 's'
}
if DEBUG:
random.seed(123456)
else:
random.seed(os.urandom(4))
|
mit
| -7,326,233,119,189,157,000
| 32.510204
| 89
| 0.68849
| false
| 2.945291
| false
| false
| false
|
andrewyoung1991/abjad
|
abjad/tools/documentationtools/ReSTDirective.py
|
1
|
3357
|
# -*- encoding: utf-8 -*-
import abc
from abjad.tools.datastructuretools.TreeContainer import TreeContainer
class ReSTDirective(TreeContainer):
r'''A ReST directive.
'''
### INITIALIZER ###
def __init__(
self,
argument=None,
children=None,
directive=None,
name=None,
options=None,
):
TreeContainer.__init__(self, children=children, name=name)
assert isinstance(options, (dict, type(None)))
self._argument = argument
self._options = {}
if options is not None:
self._options.update(options)
self._directive = directive
### PRIVATE PROPERTIES ###
@property
def _children_rest_format_contributions(self):
result = []
for child in self.children:
result.append('')
contribution = child._rest_format_contributions
for x in contribution:
if x:
result.append(' ' + x)
else:
result.append(x)
return result
@property
def _rest_format_contributions(self):
if self.argument:
result = ['.. {}:: {}'.format(self.directive, self.argument)]
else:
result = ['.. {}::'.format(self.directive)]
for key, value in sorted(self.options.items()):
option = ' :{}:'.format(key)
if value is True:
pass
elif value is None or value is False:
continue
elif isinstance(value, (list, tuple)):
option += ' ' + ', '.join(str(x) for x in value)
elif isinstance(value, (int, float, str)):
option += ' ' + str(value)
result.append(option)
result.extend(self._children_rest_format_contributions)
return result
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
return systemtools.StorageFormatSpecification(
self,
keywords_ignored_when_false=(
'children',
'name',
'options',
),
)
### PUBLIC PROPERTIES ###
@property
def argument(self):
r'''Gets and sets argument of ReST directive.
'''
return self._argument
@argument.setter
def argument(self, arg):
assert isinstance(arg, (str, type(None)))
self._argument = arg
@property
def directive(self):
r'''Gets and sets directive of ReST directive.
'''
return self._directive
@directive.setter
def directive(self, expr):
self._directive = str(expr)
@property
def node_class(self):
r'''Node class of ReST directive.
'''
from abjad.tools import documentationtools
return (
documentationtools.ReSTDirective,
documentationtools.ReSTHeading,
documentationtools.ReSTHorizontalRule,
documentationtools.ReSTParagraph,
)
@property
def options(self):
r'''Options of ReST directive.
'''
return self._options
@property
def rest_format(self):
r'''ReST format of ReST directive.
'''
return '\n'.join(self._rest_format_contributions)
|
gpl-3.0
| -1,021,700,211,659,350,400
| 26.983333
| 73
| 0.542746
| false
| 4.561141
| false
| false
| false
|
google/mirandum
|
alerts/streamtip/migrations/0002_migrate_updater.py
|
1
|
1372
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models, migrations
def migrate_updater(apps, schema_editor):
StreamtipEvent = apps.get_model("streamtip", "StreamtipEvent")
UpdaterEvent = apps.get_model("main", "UpdaterEvent")
for event in StreamtipEvent.objects.all():
try:
ue = UpdaterEvent.objects.get(pk=event.updaterevent_ptr_id)
ue.base_updater = event.updater.updater_ptr
ue.save()
except Exception:
pass
class Migration(migrations.Migration):
dependencies = [
('main', '0009_updaterevent_base_updater'),
('streamtip', '0001_initial'),
]
operations = [
migrations.RunPython(migrate_updater)
]
|
apache-2.0
| -8,171,553,838,232,492,000
| 33.3
| 75
| 0.682216
| false
| 3.864789
| false
| false
| false
|
ninefold/libcloud
|
libcloud/compute/drivers/voxel.py
|
1
|
11150
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Voxel VoxCloud driver
"""
import datetime
import hashlib
from libcloud.utils.py3 import b
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
VOXEL_API_HOST = "api.voxel.net"
class VoxelResponse(XmlResponse):
def __init__(self, response, connection):
self.parsed = None
super(VoxelResponse, self).__init__(response=response,
connection=connection)
def parse_body(self):
if not self.body:
return None
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
return self.parsed
def parse_error(self):
err_list = []
if not self.body:
return None
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
for err in self.parsed.findall('err'):
code = err.get('code')
err_list.append("(%s) %s" % (code, err.get('msg')))
# From voxel docs:
# 1: Invalid login or password
# 9: Permission denied: user lacks access rights for this method
if code == "1" or code == "9":
# sucks, but only way to detect
# bad authentication tokens so far
raise InvalidCredsError(err_list[-1])
return "\n".join(err_list)
def success(self):
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
stat = self.parsed.get('stat')
if stat != "ok":
return False
return True
class VoxelConnection(ConnectionUserAndKey):
"""
Connection class for the Voxel driver
"""
host = VOXEL_API_HOST
responseCls = VoxelResponse
def add_default_params(self, params):
params = dict([(k, v) for k, v in list(params.items())
if v is not None])
params["key"] = self.user_id
params["timestamp"] = datetime.datetime.utcnow().isoformat()+"+0000"
keys = list(params.keys())
keys.sort()
md5 = hashlib.md5()
md5.update(b(self.key))
for key in keys:
if params[key]:
if not params[key] is None:
md5.update(b("%s%s"% (key, params[key])))
else:
md5.update(b(key))
params['api_sig'] = md5.hexdigest()
return params
VOXEL_INSTANCE_TYPES = {}
RAM_PER_CPU = 2048
NODE_STATE_MAP = {
'IN_PROGRESS': NodeState.PENDING,
'QUEUED': NodeState.PENDING,
'SUCCEEDED': NodeState.RUNNING,
'shutting-down': NodeState.TERMINATED,
'terminated': NodeState.TERMINATED,
'unknown': NodeState.UNKNOWN,
}
class VoxelNodeDriver(NodeDriver):
"""
Voxel VoxCLOUD node driver
"""
connectionCls = VoxelConnection
type = Provider.VOXEL
name = 'Voxel VoxCLOUD'
website = 'http://www.voxel.net/'
def _initialize_instance_types():
for cpus in range(1,14):
if cpus == 1:
name = "Single CPU"
else:
name = "%d CPUs" % cpus
id = "%dcpu" % cpus
ram = cpus * RAM_PER_CPU
VOXEL_INSTANCE_TYPES[id]= {
'id': id,
'name': name,
'ram': ram,
'disk': None,
'bandwidth': None,
'price': None}
features = {"create_node": [],
"list_sizes": ["variable_disk"]}
_initialize_instance_types()
def list_nodes(self):
params = {"method": "voxel.devices.list"}
result = self.connection.request('/', params=params).object
return self._to_nodes(result)
def list_sizes(self, location=None):
return [ NodeSize(driver=self.connection.driver, **i)
for i in list(VOXEL_INSTANCE_TYPES.values()) ]
def list_images(self, location=None):
params = {"method": "voxel.images.list"}
result = self.connection.request('/', params=params).object
return self._to_images(result)
def create_node(self, **kwargs):
"""Create Voxel Node
@keyword name: the name to assign the node (mandatory)
@type name: C{str}
@keyword image: distribution to deploy
@type image: L{NodeImage}
@keyword size: the plan size to create (mandatory)
Requires size.disk (GB) to be set manually
@type size: L{NodeSize}
@keyword location: which datacenter to create the node in
@type location: L{NodeLocation}
@keyword ex_privateip: Backend IP address to assign to node;
must be chosen from the customer's
private VLAN assignment.
@type ex_privateip: C{str}
@keyword ex_publicip: Public-facing IP address to assign to node;
must be chosen from the customer's
public VLAN assignment.
@type ex_publicip: C{str}
@keyword ex_rootpass: Password for root access; generated if unset.
@type ex_rootpass: C{str}
@keyword ex_consolepass: Password for remote console;
generated if unset.
@type ex_consolepass: C{str}
@keyword ex_sshuser: Username for SSH access
@type ex_sshuser: C{str}
@keyword ex_sshpass: Password for SSH access; generated if unset.
@type ex_sshpass: C{str}
@keyword ex_voxel_access: Allow access Voxel administrative access.
Defaults to False.
@type ex_voxel_access: C{bool}
"""
# assert that disk > 0
if not kwargs["size"].disk:
raise ValueError("size.disk must be non-zero")
# convert voxel_access to string boolean if needed
voxel_access = kwargs.get("ex_voxel_access", None)
if voxel_access is not None:
voxel_access = "true" if voxel_access else "false"
params = {
'method': 'voxel.voxcloud.create',
'hostname': kwargs["name"],
'disk_size': int(kwargs["size"].disk),
'facility': kwargs["location"].id,
'image_id': kwargs["image"].id,
'processing_cores': kwargs["size"].ram / RAM_PER_CPU,
'backend_ip': kwargs.get("ex_privateip", None),
'frontend_ip': kwargs.get("ex_publicip", None),
'admin_password': kwargs.get("ex_rootpass", None),
'console_password': kwargs.get("ex_consolepass", None),
'ssh_username': kwargs.get("ex_sshuser", None),
'ssh_password': kwargs.get("ex_sshpass", None),
'voxel_access': voxel_access,
}
object = self.connection.request('/', params=params).object
if self._getstatus(object):
return Node(
id = object.findtext("device/id"),
name = kwargs["name"],
state = NODE_STATE_MAP[object.findtext("device/status")],
public_ips = kwargs.get("publicip", None),
private_ips = kwargs.get("privateip", None),
driver = self.connection.driver
)
else:
return None
def reboot_node(self, node):
"""
Reboot the node by passing in the node object
"""
params = {'method': 'voxel.devices.power',
'device_id': node.id,
'power_action': 'reboot'}
return self._getstatus(self.connection.request('/', params=params).object)
def destroy_node(self, node):
"""
Destroy node by passing in the node object
"""
params = {'method': 'voxel.voxcloud.delete',
'device_id': node.id}
return self._getstatus(self.connection.request('/', params=params).object)
def list_locations(self):
params = {"method": "voxel.voxcloud.facilities.list"}
result = self.connection.request('/', params=params).object
nodes = self._to_locations(result)
return nodes
def _getstatus(self, element):
status = element.attrib["stat"]
return status == "ok"
def _to_locations(self, object):
return [NodeLocation(element.attrib["label"],
element.findtext("description"),
element.findtext("description"),
self)
for element in object.findall('facilities/facility')]
def _to_nodes(self, object):
nodes = []
for element in object.findall('devices/device'):
if element.findtext("type") == "Virtual Server":
try:
state = self.NODE_STATE_MAP[element.attrib['status']]
except KeyError:
state = NodeState.UNKNOWN
public_ip = private_ip = None
ipassignments = element.findall("ipassignments/ipassignment")
for ip in ipassignments:
if ip.attrib["type"] =="frontend":
public_ip = ip.text
elif ip.attrib["type"] == "backend":
private_ip = ip.text
nodes.append(Node(id= element.attrib['id'],
name=element.attrib['label'],
state=state,
public_ips= public_ip,
private_ips= private_ip,
driver=self.connection.driver))
return nodes
def _to_images(self, object):
images = []
for element in object.findall("images/image"):
images.append(NodeImage(id = element.attrib["id"],
name = element.attrib["summary"],
driver = self.connection.driver))
return images
|
apache-2.0
| 557,125,822,919,852,400
| 35.319218
| 82
| 0.553184
| false
| 4.28023
| false
| false
| false
|
Entscheider/SeamEater
|
ImgLib/Poisson.py
|
1
|
4047
|
# -*- coding: utf-8 -*-
# Functions for Poisson-Reconstruction
import numpy as np
from ImgLib.MyFilter import myfilter as filter
# Some explanations: http://eric-yuan.me/poisson-blending/
def jacobi(A, b, N=25, x=None, progressFunc = None, stopFunc=None):
"""
Solving A*x =b for x by using the Jacobi-method.
@param A The Matrix
@param b The solution A*x=b
@param N the iterations for solving.
@param x A guess value for beginning.
@param progressFunc A function for showing the progress.
@param stopFunc Function. Stopping when evaluated to true
@return The solution x
"""
# Create an initial guess if needed
if x is None:
x = np.zeros(len(A[0]))
# Create a vector of the diagonal elements of A
# and subtract them from A
D = np.diag(A)
R = A - np.diagflat(D)
# Iterate for N times
for i in range(N):
if (progressFunc):
progressFunc(i*100/N)
if stopFunc and stopFunc():
return x
x = (b - np.dot(R, x)) / D
return x
def laplace_div(array):
'''
Calculating the Laplace derivative
@param array The Image
@return The numpy array of the Laplace derivative
'''
kern=-np.array([[0,1,0],[1,-4,1],[0,1,0]])
return filter(array,kern)
# Inspired by http://pebbie.wordpress.com/2012/04/04/python-poisson-image-editing/
def poissonInsertMask(m, mask, div, iterations=20, progressFunc = None, stopFunc=None):
'''
Computes from the Laplace derivative div and the picture m
a new picture. That picture blends them together using Poisson.
@param m The target picture
@param mask mask[x,y]=1 => Reconstruct this pixel.
mask[x,y]=0 => Use the value from m for this pixel
0<mask[x,y]<1 => Mix both picture
@param div The Laplace derivative for reconstruction. (numpy Array)
@param iterations Number of iteration for solving the linear system of equations.
iterations <=0 => Use the exact solution
@param progressFunc A function for showing the progress.
@param stopFunc Function. Stopping when evaluated to true
@return the reconstructed picture.
'''
h, w = mask.shape
r, c = mask.nonzero()
N = len(r)
idx = np.zeros(mask.shape, dtype=np.uint32)
for i in range(N):
idx.itemset((r.item(i), c.item(i)), i + 1)
b_r = np.zeros(N)
A = np.zeros((N, N))
for i in range(N):
if (progressFunc):
progressFunc(i*100//(2*N))
if stopFunc and stopFunc():
return
y, x = r.item(i), c.item(i)
b_r.itemset(i, div.item((y, x)))
p = i
Np = 0
if y > 0 and mask.item((y - 1, x)):
q = idx.item((y - 1, x)) - 1
A[p, q] = -1.
Np += 1
if x > 0 and mask.item((y, x - 1)):
q = idx.item((y, x - 1)) - 1
A[p, q] = -1.
Np += 1
if y < h - 1 and mask.item((y + 1, x)):
q = idx.item((y + 1, x)) - 1
A[p, q] = -1.
Np += 1
if x < w - 1 and mask.item((y, x + 1)):
q = idx.item((y, x + 1)) - 1
A[p, q] = -1
Np += 1
A[p, p] = Np * 1.
guess = None
x = 0
if (iterations <= 0):
x = np.linalg.solve(A,b_r).astype("uint8")
else:
if (progressFunc):
x = jacobi(A, b_r, x=guess, N=iterations, progressFunc = lambda k:progressFunc(50+k/2), stopFunc = stopFunc)
else:
x = jacobi(A, b_r, x=guess, N=iterations, stopFunc = stopFunc)
if stopFunc and stopFunc():
return None
for i in range(N):
yy, xx = r.item(i), c.item(i)
v = m[yy, xx] - x[i]
if v < 0:
v = 0
elif v > 255:
v = 255
if (iterations >0): # mixing
m[yy, xx] = v * mask[yy, xx] + m[yy, xx] * (1 - mask[yy, xx])
else: # no mixing needed ?!
m[yy, xx] = v
return m
|
gpl-3.0
| -1,879,845,899,330,050,600
| 32.172131
| 123
| 0.538424
| false
| 3.258454
| false
| false
| false
|
alhashash/odoo
|
addons/hr_timesheet_invoice/hr_timesheet_invoice.py
|
2
|
18970
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class hr_timesheet_invoice_factor(osv.osv):
_name = "hr_timesheet_invoice.factor"
_description = "Invoice Rate"
_order = 'factor'
_columns = {
'name': fields.char('Internal Name', required=True, translate=True),
'customer_name': fields.char('Name', help="Label for the customer"),
'factor': fields.float('Discount (%)', required=True, help="Discount in percentage"),
}
_defaults = {
'factor': lambda *a: 0.0,
}
class account_analytic_account(osv.osv):
def _invoiced_calc(self, cr, uid, ids, name, arg, context=None):
obj_invoice = self.pool.get('account.invoice')
res = {}
cr.execute('SELECT account_id as account_id, l.invoice_id '
'FROM hr_analytic_timesheet h LEFT JOIN account_analytic_line l '
'ON (h.line_id=l.id) '
'WHERE l.account_id = ANY(%s)', (ids,))
account_to_invoice_map = {}
for rec in cr.dictfetchall():
account_to_invoice_map.setdefault(rec['account_id'], []).append(rec['invoice_id'])
for account in self.browse(cr, uid, ids, context=context):
invoice_ids = filter(None, list(set(account_to_invoice_map.get(account.id, []))))
for invoice in obj_invoice.browse(cr, uid, invoice_ids, context=context):
res.setdefault(account.id, 0.0)
res[account.id] += invoice.amount_untaxed
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
_inherit = "account.analytic.account"
_columns = {
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist',
help="The product to invoice is defined on the employee form, the price will be deducted by this pricelist on the product."),
'amount_max': fields.float('Max. Invoice Price',
help="Keep empty if this contract is not limited to a total fixed price."),
'amount_invoiced': fields.function(_invoiced_calc, string='Invoiced Amount',
help="Total invoiced"),
'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Timesheet Invoicing Ratio',
help="You usually invoice 100% of the timesheets. But if you mix fixed price and timesheet invoicing, you may use another ratio. For instance, if you do a 20% advance invoice (fixed price, based on a sales order), you should invoice the rest on timesheet with a 80% ratio."),
}
_defaults = {
'pricelist_id': lambda self, cr, uid, c: self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'product.list0')
}
def on_change_partner_id(self, cr, uid, ids, partner_id, name, context=None):
res = super(account_analytic_account, self).on_change_partner_id(cr, uid, ids, partner_id, name, context=context)
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
if pricelist:
res['value']['pricelist_id'] = pricelist
return res
def set_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def set_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def set_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice', ondelete="set null", copy=False),
'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Invoiceable', help="It allows to set the discount while making invoice, keep empty if the activities should not be invoiced."),
}
def _default_journal(self, cr, uid, context=None):
proxy = self.pool.get('hr.employee')
record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context)
if record_ids:
employee = proxy.browse(cr, uid, record_ids[0], context=context)
return employee.journal_id and employee.journal_id.id or False
return False
def _default_general_account(self, cr, uid, context=None):
proxy = self.pool.get('hr.employee')
record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context)
if record_ids:
employee = proxy.browse(cr, uid, record_ids[0], context=context)
if employee.product_id and employee.product_id.property_account_income:
return employee.product_id.property_account_income.id
return False
_defaults = {
'journal_id' : _default_journal,
'general_account_id' : _default_general_account,
}
def write(self, cr, uid, ids, vals, context=None):
self._check_inv(cr, uid, ids, vals)
return super(account_analytic_line,self).write(cr, uid, ids, vals,
context=context)
def _check_inv(self, cr, uid, ids, vals):
select = ids
if isinstance(select, (int, long)):
select = [ids]
if ( not vals.has_key('invoice_id')) or vals['invoice_id' ] == False:
for line in self.browse(cr, uid, select):
if line.invoice_id:
raise UserError(_('You cannot modify an invoiced analytic line!'))
return True
def _get_invoice_price(self, cr, uid, account, product_id, user_id, qty, context = {}):
pro_price_obj = self.pool.get('product.pricelist')
if account.pricelist_id:
pl = account.pricelist_id.id
price = pro_price_obj.price_get(cr,uid,[pl], product_id, qty or 1.0, account.partner_id.id, context=context)[pl]
else:
price = 0.0
return price
def _prepare_cost_invoice(self, cr, uid, partner, company_id, currency_id, analytic_lines, group_by_partner=False, context=None):
""" returns values used to create main invoice from analytic lines"""
account_payment_term_obj = self.pool['account.payment.term']
if group_by_partner:
invoice_name = partner.name
else:
invoice_name = analytic_lines[0].account_id.name
date_due = False
if partner.property_payment_term:
pterm_list = account_payment_term_obj.compute(cr, uid,
partner.property_payment_term.id, value=1,
date_ref=time.strftime('%Y-%m-%d'))
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
date_due = pterm_list[-1]
return {
'name': "%s - %s" % (time.strftime('%d/%m/%Y'), invoice_name),
'partner_id': partner.id,
'company_id': company_id,
'payment_term': partner.property_payment_term.id or False,
'account_id': partner.property_account_receivable.id,
'currency_id': currency_id,
'date_due': date_due,
'fiscal_position': partner.property_account_position.id
}
def _prepare_cost_invoice_line(self, cr, uid, invoice_id, product_id, uom, user_id,
factor_id, account, analytic_lines, journal_type, data, context=None):
product_obj = self.pool['product.product']
uom_context = dict(context or {}, uom=uom)
total_price = sum(l.amount for l in analytic_lines)
total_qty = sum(l.unit_amount for l in analytic_lines)
if data.get('product'):
# force product, use its public price
if isinstance(data['product'], (tuple, list)):
product_id = data['product'][0]
else:
product_id = data['product']
unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context)
elif journal_type == 'general' and product_id:
# timesheets, use sale price
unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context)
else:
# expenses, using price from amount field
unit_price = total_price*-1.0 / total_qty
factor = self.pool['hr_timesheet_invoice.factor'].browse(cr, uid, factor_id, context=uom_context)
factor_name = factor.customer_name
curr_invoice_line = {
'price_unit': unit_price,
'quantity': total_qty,
'product_id': product_id,
'discount': factor.factor,
'invoice_id': invoice_id,
'name': factor_name,
'uos_id': uom,
'account_analytic_id': account.id,
}
if product_id:
product = product_obj.browse(cr, uid, product_id, context=uom_context)
factor_name = product_obj.name_get(cr, uid, [product_id], context=uom_context)[0][1]
if factor.customer_name:
factor_name += ' - ' + factor.customer_name
general_account = product.property_account_income or product.categ_id.property_account_income_categ
if not general_account:
raise UserError(_("Configuration Error!") + '\n' + _("Please define income account for product '%s'.") % product.name)
taxes = product.taxes_id or general_account.tax_ids
tax = self.pool['account.fiscal.position'].map_tax(cr, uid, account.partner_id.property_account_position, taxes)
curr_invoice_line.update({
'invoice_line_tax_id': [(6, 0, tax)],
'name': factor_name,
'invoice_line_tax_id': [(6, 0, tax)],
'account_id': general_account.id,
})
note = []
for line in analytic_lines:
# set invoice_line_note
details = []
if data.get('date', False):
details.append(line['date'])
if data.get('time', False):
if line['product_uom_id']:
details.append("%s %s" % (line.unit_amount, line.product_uom_id.name))
else:
details.append("%s" % (line['unit_amount'], ))
if data.get('name', False):
details.append(line['name'])
if details:
note.append(u' - '.join(map(lambda x: unicode(x) or '', details)))
if note:
curr_invoice_line['name'] += "\n" + ("\n".join(map(lambda x: unicode(x) or '', note)))
return curr_invoice_line
def invoice_cost_create(self, cr, uid, ids, data=None, context=None):
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoices = []
if context is None:
context = {}
if data is None:
data = {}
# use key (partner/account, company, currency)
# creates one invoice per key
invoice_grouping = {}
# grouping on partner instead of analytic account
group_by_partner = data.get('group_by_partner', False)
currency_id = False
# prepare for iteration on journal and accounts
for line in self.browse(cr, uid, ids, context=context):
# check if currency is the same in different accounts when grouping by partner
if not currency_id :
currency_id = line.account_id.pricelist_id.currency_id.id
if line.account_id.pricelist_id and line.account_id.pricelist_id.currency_id:
if line.account_id.pricelist_id.currency_id.id != currency_id and group_by_partner:
raise UserError(_('You cannot group invoices having different currencies on different analytic accounts for the same partner.'))
if group_by_partner:
key = (line.account_id.partner_id.id,
line.account_id.company_id.id,
line.account_id.pricelist_id.currency_id.id)
invoice_grouping.setdefault(key, []).append(line)
else:
key = (line.account_id.id,
line.account_id.company_id.id,
line.account_id.pricelist_id.currency_id.id)
invoice_grouping.setdefault(key, []).append(line)
for (key_id, company_id, currency_id), analytic_lines in invoice_grouping.items():
# key_id is either an account.analytic.account, either a res.partner
# don't really care, what's important is the analytic lines that
# will be used to create the invoice lines
partner = analytic_lines[0].account_id.partner_id # will be the same for every line
curr_invoice = self._prepare_cost_invoice(cr, uid, partner, company_id, currency_id, analytic_lines, group_by_partner, context=context)
invoice_context = dict(context,
lang=partner.lang,
force_company=company_id, # set force_company in context so the correct product properties are selected (eg. income account)
company_id=company_id) # set company_id in context, so the correct default journal will be selected
last_invoice = invoice_obj.create(cr, uid, curr_invoice, context=invoice_context)
invoices.append(last_invoice)
# use key (product, uom, user, invoiceable, analytic account, journal type)
# creates one invoice line per key
invoice_lines_grouping = {}
for analytic_line in analytic_lines:
account = analytic_line.account_id
if (not partner) or not (account.pricelist_id):
raise UserError(_('Contract incomplete. Please fill in the Customer and Pricelist fields for %s.') % (account.name))
if not analytic_line.to_invoice:
raise UserError(_('Trying to invoice non invoiceable line for %s.') % (analytic_line.product_id.name))
key = (analytic_line.product_id.id,
analytic_line.product_uom_id.id,
analytic_line.user_id.id,
analytic_line.to_invoice.id,
analytic_line.account_id,
analytic_line.journal_id.type)
invoice_lines_grouping.setdefault(key, []).append(analytic_line)
# finally creates the invoice line
for (product_id, uom, user_id, factor_id, account, journal_type), lines_to_invoice in invoice_lines_grouping.items():
curr_invoice_line = self._prepare_cost_invoice_line(cr, uid, last_invoice,
product_id, uom, user_id, factor_id, account, lines_to_invoice,
journal_type, data, context=context)
invoice_line_obj.create(cr, uid, curr_invoice_line, context=context)
self.write(cr, uid, [l.id for l in analytic_lines], {'invoice_id': last_invoice}, context=context)
invoice_obj.button_reset_taxes(cr, uid, [last_invoice], context)
return invoices
class hr_analytic_timesheet(osv.osv):
_inherit = "hr.analytic.timesheet"
def on_change_account_id(self, cr, uid, ids, account_id, user_id=False):
res = {}
if not account_id:
return res
res.setdefault('value',{})
acc = self.pool.get('account.analytic.account').browse(cr, uid, account_id)
st = acc.to_invoice.id
res['value']['to_invoice'] = st or False
if acc.state=='pending':
res['warning'] = {
'title': 'Warning',
'message': 'The analytic account is in pending state.\nYou should not work on this account !'
}
return res
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _get_analytic_lines(self, cr, uid, ids, context=None):
iml = super(account_invoice, self)._get_analytic_lines(cr, uid, ids, context=context)
inv = self.browse(cr, uid, ids, context=context)[0]
if inv.type == 'in_invoice':
obj_analytic_account = self.pool.get('account.analytic.account')
for il in iml:
if il['account_analytic_id']:
# *-* browse (or refactor to avoid read inside the loop)
to_invoice = obj_analytic_account.read(cr, uid, [il['account_analytic_id']], ['to_invoice'], context=context)[0]['to_invoice']
if to_invoice:
il['analytic_lines'][0][2]['to_invoice'] = to_invoice[0]
return iml
class account_move_line(osv.osv):
_inherit = "account.move.line"
def create_analytic_lines(self, cr, uid, ids, context=None):
res = super(account_move_line, self).create_analytic_lines(cr, uid, ids,context=context)
analytic_line_obj = self.pool.get('account.analytic.line')
for move_line in self.browse(cr, uid, ids, context=context):
#For customer invoice, link analytic line to the invoice so it is not proposed for invoicing in Bill Tasks Work
invoice_id = move_line.invoice and move_line.invoice.type in ('out_invoice','out_refund') and move_line.invoice.id or False
for line in move_line.analytic_lines:
analytic_line_obj.write(cr, uid, line.id, {
'invoice_id': invoice_id,
'to_invoice': line.account_id.to_invoice and line.account_id.to_invoice.id or False
}, context=context)
return res
|
agpl-3.0
| -3,447,457,845,333,173,000
| 47.516624
| 287
| 0.59009
| false
| 3.943047
| false
| false
| false
|
Diksha-Rathi/find-my-place
|
find-my-place/settings.py
|
1
|
2706
|
"""
Django settings for find-my-place project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '**************************************************'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'find-my-place.urls'
WSGI_APPLICATION = 'find-my-place.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Parse database configuration from $DATABASE_URL
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,'../static'),)
TEMPLATE_DIRS = ( os.path.join(BASE_DIR, '../templates'),)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'username@domain.com'
SERVER_EMAIL = 'username@domain.com'
EMAIL_HOST = 'smtp.domain.com'
EMAIL_HOST_USER = 'username@domain.com'
EMAIL_HOST_PASSWORD = '********'
EMAIL_PORT = 587
|
mit
| 4,452,227,457,241,505,000
| 23.834862
| 71
| 0.705839
| false
| 3.35316
| false
| false
| false
|
smerkousdavid/rem-sphinx
|
logger.py
|
1
|
2066
|
# -*- coding: utf-8 -*-
"""RemSphinx speech to text logger
This module is designed to just handle logging. There's nothing more to it
Just printing and logging to files
Developed By: David Smerkous
"""
from logging import getLogger, INFO, Formatter, FileHandler, StreamHandler
from os.path import dirname, realpath, isdir, exists
from os import makedirs
from time import strftime
from sys import stdout
# Define logging characteristics
LOGGER_NAME = "RemSphinx"
LOGGER_LEVEL = INFO
LOGGER_FORMAT = Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
LOGGER_FILE_PATH = "%s/logs" % dirname(realpath(__file__))
LOGGER_FILE_DATE = strftime("%d-%m-%y--%H-%M-%S")
LOGGER_FILE_FORMAT = "%s/%s.log" % (LOGGER_FILE_PATH, LOGGER_FILE_DATE)
if not isdir(LOGGER_FILE_PATH):
print("Creating new log location %s..." % LOGGER_FILE_PATH),
makedirs(LOGGER_FILE_PATH)
print("Done")
if not exists(LOGGER_FILE_FORMAT):
print("Creating new log file %s..." % LOGGER_FILE_FORMAT),
open(LOGGER_FILE_FORMAT, 'w').close()
print("Done")
LOGGER_FILE_HANDLER = FileHandler(LOGGER_FILE_FORMAT)
LOGGER_FILE_HANDLER.setFormatter(LOGGER_FORMAT)
LOGGER_CONSOLE_HANDLER = StreamHandler(stdout)
LOGGER_CONSOLE_HANDLER.setFormatter(LOGGER_FORMAT)
LOGGER = getLogger(LOGGER_NAME)
LOGGER.addHandler(LOGGER_FILE_HANDLER)
# Uncomment when not using tornado, which already has a console handler
# LOGGER.addHandler(LOGGER_CONSOLE_HANDLER)
class logger(object):
def __init__(self, name_space, logger_level=LOGGER_LEVEL):
LOGGER.setLevel(logger_level)
LOGGER.debug("Starting logger!")
self._name_space = name_space
def __base_log(self, to_log):
return "|%s|: %s" % (self._name_space, str(to_log))
def info(self, to_log):
LOGGER.info(self.__base_log(to_log))
def debug(self, to_log):
LOGGER.debug(self.__base_log(to_log))
def warning(self, to_log):
LOGGER.warning(self.__base_log(to_log))
def error(self, to_log):
LOGGER.error(self.__base_log(to_log))
|
gpl-3.0
| -8,818,605,115,806,239,000
| 30.784615
| 96
| 0.693611
| false
| 3.343042
| false
| false
| false
|
cabanm/project-euler
|
myMath.py
|
1
|
3540
|
from time import time
from math import sqrt
# Time some code
def timeIt(code):
start = time()
exec code
return time()-start
# Find primes up to a certain number and output a dictionary with them as keys
def primes(top):
sieve = [0]*top
for m in range(2, top+1):
if sieve[m-1] == 0: # if m prime
for n in range(m, top//m+1):
p = m*n
sieve[p-1] = 1
primes = {}
for n in range(2,top+1):
if sieve[n-1] == 0: primes[n] = 0
return primes
# Find Pythagorean triplets with short sides up to and equal to max side
def pythTrips(maxSide):
triples = []
for a in range(1, maxSide+1):
for b in range(1, a):
c = sqrt(a**2+b**2)
if c == int(c): triples.append((a,b,int(c)))
return triples
# Find Pythagorean triplets with max perimeter specified
def pythTripsPerim(p):
triples = []
for a in range(1, p):
for b in range(1, a):
c = sqrt(a**2+b**2)
if c == int(c) and a+b+c <= p: triples.append((a,b,int(c)))
return triples
# Checks if the input string is a pandigital number
def isPandigital(n):
if n.count('0') != 0: return 0
for digit in range(1,10):
if n.count(str(digit)) > 1:
return 0
return 1
# Checks if input number is prime
def isPrime(n):
n = abs(n)
if n==0 or n==1: return 0
#print 'Checking primality:', n
maxm = int(sqrt(n))+1
for d in range(2, maxm):
#if (d*100//maxm)%10 == 0: print d/1.0/maxm
if n%d == 0: return 0
return 1
# Returns the prime factors of a number given a set of primes
def pFactors(n,primes):
i = 0
divs = []
while n != 1:
p = primes[i]
if n%p == 0:
divs.append(p)
n = n/p
i = 0
else:
i += 1
return divs
# Returns the number of unique prime factors for numbers up to and incl. top
def pFactors2(top):
sieve = [0]*top
sieve[0] = 1
for m in range(2, top+1):
if sieve[m-1] == 0: # if m is prime
for n in range(2, top//m+1):
p = m*n
sieve[p-1] += 1
return sieve
# Checks if a number is pentagonal
def isPent(n):
d = sqrt(1.+24*n)
if d%1 == 0 and (d+1)%6 == 0: return 1
return 0
# Returns a list of the amount of each digit a number has
# Note: a method with purely mathematical operations took longer than using strings!!!!
def digSig(n):
sig = [0]*10
for d in str(n):
sig[int(d)] += 1
return sig
# Returns the set of digits in a number
def digits(n):
return set([int(ch) for ch in str(n)])
# Returns the number of digits in a number
def digNum(n):
return len(str(n))
# Returns factorial of number
def factorial(n):
out=1
for x in range(1,abs(n)+1):
out = out*x
return out
# The combinatoric formula, that will work well for large n and reasonable r
def nCr(n,r):
if n<r:
return "n must be leq r"
out=1
for x in range(n-r+1,n+1):
out = out*x
return out/factorial(r)
# Returns all possible combinations of a list
def combinations(s): # Rename to subsets!!!!!
yield []
for i, d in enumerate(s):
for comb in combinations(s[i+1:]):
yield [d] + comb
# Returns whether a number is a palindrome
def isPalindromic(n):
n=str(n)
if n==''.join([n[-i-1] for i in range(len(n))]): return 1
return 0
# Returns the reverse of an integer
def reverse(n):
n=str(n)
return int(''.join([n[-i-1] for i in range(len(n))]))
# Returns the digital sum of a number
def digSum(n):
total = 0
for m,n in enumerate(digSig(n)):
total += m*n
return total
# Returns whether a number is square
def isSquare(n):
# Perfect squares end in 0, 1, 4, 9 in hexadecimal
# Thus we check this first, then apply general method
if hex(n)[-1] in ['0','1','4','9']:
if int(n**0.5)**2 == n: return 1
return 0
|
gpl-2.0
| 9,074,495,308,821,205,000
| 21.987013
| 87
| 0.641243
| false
| 2.537634
| false
| false
| false
|
kiliakis/BLonD
|
beams/beams.py
|
1
|
5676
|
# Copyright 2015 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Module containing the fundamental beam class with methods to compute beam statistics**
:Authors: **Danilo Quartullo**, **Helga Timko**, **ALexandre Lasheen**
'''
from __future__ import division
import numpy as np
from trackers.utilities import is_in_separatrix
class Beam(object):
'''
*Object containing the beam coordinates and beam properties such as mass,
charge, synchronous energy, momentum, etc.
The beam coordinate 'dt' is defined as the particle arrival time to the RF
station w.r.t. the reference time that is the sum of turns.
The beam coordiate 'dE' is defined as the particle energy offset w.r.t. the
energy of the synchronous particle.*
'''
def __init__(self, GeneralParameters, n_macroparticles, intensity):
#: *Import particle mass [eV] (from GeneralParameters)*
self.mass = GeneralParameters.mass
#: *Import particle charge [e] (from GeneralParameters)*
self.charge = GeneralParameters.charge
#: *Import synchronous relativistic beta [1] (from GeneralParameters)*
self.beta = GeneralParameters.beta[0][0]
#: *Import synchronous relativistic gamma [1] (from GeneralParameters)*
self.gamma = GeneralParameters.gamma[0][0]
#: *Import synchronous total energy [eV] (from GeneralParameters)*
self.energy = GeneralParameters.energy[0][0]
#: *Import synchronous momentum [eV] (from GeneralParameters)*
self.momentum = GeneralParameters.momentum[0][0]
#: *Import ring radius [m] (from GeneralParameters)*
#self.ring_radius = GeneralParameters.ring_radius
#: | *Beam arrival time with respect to reference time [s]*
self.dt = np.zeros([n_macroparticles])
#: | *Beam energy offset with respect to synchronous energy [eV]*
self.dE = np.zeros([n_macroparticles])
#: | *Average beam arrival time [s]*
self.mean_dt = 0
#: | *Average beam energy offset [eV]*
self.mean_dE = 0
#: | *Standard deviation of beam arrival time [s]*
self.sigma_dt = 0
#: | *Standard deviation of beam energy offset [eV]*
self.sigma_dE = 0
#: | *Total beam intensity [1]*
self.intensity = intensity
#: | *Total number of macro-particles in the beam [1]*
self.n_macroparticles = int(n_macroparticles)
#: | *This ratio should be in general constant during the simulation*
self.ratio = self.intensity/self.n_macroparticles
#: | *Number of macro-particles marked as 'lost' [1]*
#: | *Losses defined via loss mechanisms chosen by user*
self.n_macroparticles_lost = 0
#: | *Number of transmitted macro-particles (= total - lost) [1]*
#self.n_macroparticles_alive = self.n_macroparticles - self.n_macroparticles_lost
#: | *Unique macro-particle ID number; zero if particle is 'lost'*
self.id = np.arange(1, self.n_macroparticles + 1, dtype=int)
@property
def n_macroparticles_alive(self):
'''
*Number of transmitted macro-particles.*
'''
return self.n_macroparticles - self.n_macroparticles_lost
def statistics(self):
'''
*Calculation of the mean and standard deviation of beam coordinates,
as well as beam emittance using different definitions.*
'''
# Statistics only for particles that are not flagged as lost
itemindex = np.where(self.id != 0)[0]
self.mean_dt = np.mean(self.dt[itemindex])
self.mean_dE = np.mean(self.dE[itemindex])
self.sigma_dt = np.std(self.dt[itemindex])
self.sigma_dE = np.std(self.dE[itemindex])
# R.m.s. emittance in Gaussian approximation
self.epsn_rms_l = np.pi*self.sigma_dE*self.sigma_dt # in eVs
# Losses
self.n_macroparticles_lost = len( np.where( self.id == 0 )[0] )
def losses_separatrix(self, GeneralParameters, RFSectionParameters, Beam):
'''
*Beam losses based on separatrix.*
'''
itemindex = np.where(is_in_separatrix(GeneralParameters,
RFSectionParameters,
Beam, self.dt, self.dE)
== False)[0]
if itemindex.size != 0:
self.id[itemindex] = 0
def losses_longitudinal_cut(self, dt_min, dt_max):
'''
*Beam losses based on longitudinal cuts.*
'''
itemindex = np.where( (self.dt - dt_min)*(dt_max - self.dt) < 0 )[0]
if itemindex.size != 0:
self.id[itemindex] = 0
def losses_energy_cut(self, dE_min, dE_max):
'''
*Beam losses based on energy cuts, e.g. on collimators.*
'''
itemindex = np.where( (self.dE - dE_min)*(dE_max - self.dE) < 0 )[0]
if itemindex.size != 0:
self.id[itemindex] = 0
|
gpl-3.0
| -2,657,791,008,127,920,000
| 35.152866
| 90
| 0.586328
| false
| 4.011307
| false
| false
| false
|
GhostshipSoftware/avaloria
|
game/gamesrc/chargen.py
|
1
|
12872
|
"""
Contribution - Griatch 2011
[Note - with the advent of MULTISESSION_MODE=2, this is not really
as necessary anymore - the ooclook and @charcreate commands in that
mode replaces this module with better functionality.]
This is a simple character creation commandset. A suggestion is to
test this together with menu_login, which doesn't create a Character
on its own. This shows some more info and gives the Player the option
to create a character without any more customizations than their name
(further options are unique for each game anyway).
Since this extends the OOC cmdset, logging in from the menu will
automatically drop the Player into this cmdset unless they logged off
while puppeting a Character already before.
Installation:
Read the instructions in game/gamesrc/commands/examples/cmdset.py in
order to create a new default cmdset module for Evennia to use (copy
the template up one level, and change the settings file's relevant
variables to point to the cmdsets inside). If you already have such
a module you should of course use that.
Next import this module in your custom cmdset module and add the
following line to the end of OOCCmdSet's at_cmdset_creation():
self.add(chargen.OOCCmdSetCharGen)
"""
from django.conf import settings
from ev import Command, create_object, utils, CmdSet
from ev import default_cmds, managers
from game.gamesrc.menu_login import *
from game.gamesrc.objects import copyreader
CHARACTER_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
class CmdOOCLook(default_cmds.CmdLook):
"""
ooc look
Usage:
look
look <character>
This is an OOC version of the look command. Since a Player doesn't
have an in-game existence, there is no concept of location or
"self".
If any characters are available for you to control, you may look
at them with this command.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_cateogory = "General"
def func(self):
"""
Implements the ooc look command
We use an attribute _character_dbrefs on the player in order
to figure out which characters are "theirs". A drawback of this
is that only the CmdCharacterCreate command adds this attribute,
and thus e.g. player #1 will not be listed (although it will work).
Existence in this list does not depend on puppeting rights though,
that is checked by the @ic command directly.
"""
# making sure caller is really a player
self.character = None
if utils.inherits_from(self.caller, "src.objects.objects.Object"):
# An object of some type is calling. Convert to player.
#print self.caller, self.caller.__class__
self.character = self.caller
if hasattr(self.caller, "player"):
self.caller = self.caller.player
if not self.character:
# ooc mode, we are players
avail_chars = self.caller.db._character_dbrefs
if self.args:
# Maybe the caller wants to look at a character
if not avail_chars:
self.caller.msg("You have no characters to look at. Why not create one?")
return
objs = managers.objects.get_objs_with_key_and_typeclass(self.args.strip(), CHARACTER_TYPECLASS)
objs = [obj for obj in objs if obj.id in avail_chars]
if not objs:
self.caller.msg("You cannot see this Character.")
return
self.caller.msg(objs[0].return_appearance(self.caller))
return
# not inspecting a character. Show the OOC info.
charobjs = []
charnames = []
if self.caller.db._character_dbrefs:
dbrefs = self.caller.db._character_dbrefs
charobjs = [managers.objects.get_id(dbref) for dbref in dbrefs]
charnames = [charobj.key for charobj in charobjs if charobj]
if charnames:
charlist = "The following Character(s) are available:\n\n"
charlist += "\n\r".join(["{w %s{n" % charname for charname in charnames])
charlist += "\n\n Use {w@ic <character name>{n to switch to that Character."
else:
charlist = "You have no Characters."
string = \
""" You, %s, are an {wOOC ghost{n without form. The world is hidden
from you and besides chatting on channels your options are limited.
You need to have a Character in order to interact with the world.
%s
Use {wcreate <name>{n to create a new character and {whelp{n for a
list of available commands.""" % (self.caller.key, charlist)
self.caller.msg(string)
else:
# not ooc mode - leave back to normal look
# we have to put this back for normal look to work.
self.caller = self.character
super(CmdOOCLook, self).func()
class CmdOOCCharacterCreate(Command):
"""
creates a character
Usage:
create <character name>
This will create a new character, assuming
the given character name does not already exist.
"""
key = "create"
locks = "cmd:all()"
def func(self):
"""
Tries to create the Character object. We also put an
attribute on ourselves to remember it.
"""
# making sure caller is really a player
self.character = None
if utils.inherits_from(self.caller, "src.objects.objects.Object"):
# An object of some type is calling. Convert to player.
#print self.caller, self.caller.__class__
self.character = self.caller
if hasattr(self.caller, "player"):
self.caller = self.caller.player
if not self.args:
self.caller.msg("Usage: create <character name>")
return
charname = self.args.strip()
old_char = managers.objects.get_objs_with_key_and_typeclass(charname, CHARACTER_TYPECLASS)
if old_char:
self.caller.msg("Character {c%s{n already exists." % charname)
return
# create the character
new_character = create_object(CHARACTER_TYPECLASS, key=charname)
if not new_character:
self.caller.msg("{rThe Character couldn't be created. This is a bug. Please contact an admin.")
return
# make sure to lock the character to only be puppeted by this player
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, self.caller.id))
# save dbref
avail_chars = self.caller.db._character_dbrefs
if avail_chars:
avail_chars.append(new_character.id)
else:
avail_chars = [new_character.id]
self.caller.db._character_dbrefs = avail_chars
self.caller.msg("{gThe Character {c%s{g was successfully created!" % charname)
self.caller.obj = new_character
attributes = new_character.db.attributes
nodes = []
copy_dir = '/Users/geoffrey/gitrepos/avaloria/game/gamesrc/copy/'
for option in ['race', 'deity', 'alignment', 'gender']:
if 'race' in option:
for race in ['bardok', 'erelania', 'the unknowns', 'earthen', 'gerdling']:
confirm_node = MenuNode("confirm-%s" % race, links=['deity'], linktexts=['Choose your deity.'], code="self.caller.obj.set_race('%s')" % race)
nodes.append(confirm_node)
if 'bardok' in race:
text = copyreader.read_file("%s/races/bardok_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-bardok', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
elif 'erelania' in race:
text = copyreader.read_file("%s/races/erelania_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-erelania', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
elif 'gerdling' in race:
text = copyreader.read_file("%s/races/gerdling_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-gerdling', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
elif 'earthen' in race:
text = copyreader.read_file("%s/races/earthen_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-earthen', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
nodes.append(race_node)
text = copyreader.read_file("%s/races/races_desc.txt" % copy_dir)
root_race_node = MenuNode("%s" % option, text=text, links=['bardok', 'erelania', 'gerdling', 'earthen'], linktexts=['The Bardok', 'The Erelania', 'The Gerdling', 'The Earthen'])
nodes.append(root_race_node)
elif 'deity' in option:
deities = ['ankarith', 'slyth', 'green warden', 'kaylynne']
for deity in deities:
confirm_node = MenuNode('confirm-%s' % deity, links=['gender'], linktexts=['Choose your gender.'], code="self.caller.obj.set_deity('%s')" % deity)
nodes.append(confirm_node)
if 'karith' in deity:
text = copyreader.read_file("%s/deities/ankarith_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-ankarith', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
#self.obj.msg("links: %s, linktexts: %s" % (deity_node.links, deity_node.linktexts))
elif 'slyth' in deity:
text = copyreader.read_file("%s/deities/slyth_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-slyth', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
elif 'green warden' in deity:
text = copyreader.read_file("%s/deities/greenwarden_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-green warden', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
elif 'kaylynne' in deity:
text = copyreader.read_file("%s/deities/kaylynne_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-kaylynne', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
nodes.append(deity_node)
deity_node_text = copyreader.read_file("%s/deities/deities_desc.txt" % copy_dir)
root_deity_node = MenuNode("deity", text=deity_node_text, links=['ankarith', 'slyth', 'green warden', 'kaylynne'],
linktexts=['An\'Karith', 'Slyth of the Glade', 'The Green Warden', 'Kaylynne'])
nodes.append(root_deity_node)
elif 'gender' in option:
confirm_male = MenuNode("confirm-gender-male", links=['END'], linktexts=["Go forth"], code="self.caller.obj.set_gender('male')")
confirm_female = MenuNode("confirm-gender-female", links=['END'], linktexts=["Go forth"], code="self.caller.obj.set_gender('female')")
nodes.append(confirm_male)
nodes.append(confirm_female)
text = """
--{rGender Selection{n--
Please select which gender you would like to be:
"""
gender_node = MenuNode("gender", text=text, links=['confirm-gender-male', 'confirm-gender-female'],
linktexts=['Male', 'Female'])
nodes.append(gender_node)
start_node = MenuNode("START", text="{bWelcome to Avaloria. Please proceed through the menu to customize your character.{n",
links=['race' ], linktexts=['Choose your race.'])
nodes.append(start_node)
node_string = ' '.join([node.key for node in nodes])
self.caller.msg("{mDEBUG: nodes: %s{n" % node_string)
menutree = MenuTree(caller=self.caller, nodes=nodes)
menutree.start()
class OOCCmdSetCharGen(CmdSet):
"""
Extends the default OOC cmdset.
"""
def at_cmdset_creation(self):
"Install everything from the default set, then overload"
#super(OOCCmdSetCharGen, self).at_cmdset_creation()
self.add(CmdOOCLook())
self.add(CmdOOCCharacterCreate())
|
bsd-3-clause
| 8,105,016,762,496,736,000
| 47.213483
| 194
| 0.601849
| false
| 3.886473
| false
| false
| false
|
lrt512/emol
|
emol/emol/models/privacy_acceptance.py
|
1
|
4199
|
# -*- coding: utf-8 -*-
"""Model to record combatants' acceptance of the privacy policy."""
# standard library imports
# pylint complains about the uuid import but it is used for Required(uuid.UUID)
# pylint: disable=unused-import
import uuid
from datetime import datetime
# third-party imports
from flask import url_for, current_app as app
# application imports
from emol.mail import Emailer
from emol.utility.database import default_uuid
__all__ = ['PrivacyAcceptance']
class PrivacyAcceptance(app.db.Model):
"""Record indicating acceptance of the privacy policy.
When a Combatant record is inserted into the database, the listener
event creates a matching PrivacyAccepted record. Any combatant who has
a PrivacyAccepted record that is not resolved cannot use the system
until they accept the privacy policy
When the combatant accepts the privacy policy, the PrivacyAccepted record
is resolved by noting the datetime that the privacy policy was accepted
If the combatant declines the privacy policy, the Combatant record and the
related PrivacyAcceptance is deleted from the database and the MoL is
informed
Attributes:
id: Identity PK for the table
uuid: A reference to the record with no intrinsic meaning
accepted: Date the combatant accepted the privacy policy
combatant_id: ID of the related combatant
combatant: ORM relationship to the Combatant identified by combatant_id
"""
id = app.db.Column(app.db.Integer, primary_key=True)
combatant_id = app.db.Column(app.db.Integer, app.db.ForeignKey('combatant.id'))
combatant = app.db.relationship(
'Combatant',
backref=app.db.backref('privacy_acceptance', uselist=False, cascade="all, delete-orphan")
)
uuid = app.db.Column(app.db.String(36), default=default_uuid)
accepted = app.db.Column(app.db.DateTime)
@classmethod
def create(cls, combatant, no_email=False):
"""Generate a PrivacyAccepted record for a combatant.
Generates and saves the PrivacyAccepted record, then sends out the
email to prompt the combatant to visit eMoL and read (heh) and accept
the privacy policy
Attributes:
combatant: A combatant
no_email: Should be used for unit testing only
"""
privacy_acceptance = cls(combatant=combatant)
app.db.session.add(privacy_acceptance)
app.db.session.commit()
emailer = Emailer()
emailer.send_privacy_policy_acceptance(privacy_acceptance)
@property
def privacy_policy_url(self):
"""Generate the URL for a user to visit to accept the privacy policy.
Uses the uuid member to uniquely identify this privacy accepted record,
and through it the combatant.
Returns:
String containing the URL
"""
return url_for('privacy_policy.index', uuid=self.uuid, _external=True)
def resolve(self, accepted):
if accepted is True:
# Combatant accepted the privacy policy. Note the time of
# acceptance, generate their card_id and email them the
# link to their card
self.accepted = datetime.utcnow()
self.combatant.generate_card_id()
emailer = Emailer()
emailer.send_card_request(self.combatant)
app.logger.debug('Sent card request email to {0}'.format(
self.combatant.email
))
has_sca_name = self.combatant.sca_name is not None
return {
'accepted': True,
'card_url': self.combatant.card_url,
'has_sca_name': has_sca_name
}
else:
# Combatant declined the privacy policy, delete the Combatant
# record for them and notify the MoL
combatant = self.combatant
app.db.session.delete(self)
app.db.session.delete(combatant)
app.logger.info('Deleted combatant {0}'.format(
self.combatant.email))
# TODO: Notify the MoL
app.db.session.commit()
return {'accepted': False}
|
mit
| 8,481,351,691,787,836,000
| 34.584746
| 97
| 0.656109
| false
| 4.01434
| false
| false
| false
|
openpaul/DNApy
|
output.py
|
1
|
5753
|
#!/usr/bin/python
#This file is part of DNApy. DNApy is a DNA editor written purely in python.
#The program is intended to be an intuitive, fully featured,
#extendable, editor for molecular and synthetic biology.
#Enjoy!
#
#Copyright (C) 2014 Martin Engqvist |
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#LICENSE:
#This file is part of DNApy.
#
#DNApy is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License, or
#(at your option) any later version.
#
#DNApy is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Library General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software Foundation,
#Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#Get source code at: https://github.com/0b0bby0/DNApy
#
import wx.richtext as rt
import wx
from base_class import DNApyBaseClass
#from wx.lib.pubsub import pub
class create(DNApyBaseClass):
'''A class to print colored output to a rich textctrl'''
def __init__(self, parent, style):
super(create, self).__init__(parent, style)
self.rtc = rt.RichTextCtrl(self)
self.rtc.SetEditable(False) #make it not editable
font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Consolas')
self.rtc.SetFont(font)
# self.rtc.Bind(wx.EVT_KEY_DOWN, self.OnKeyPress)
#determing which listening group from which to recieve messages about UI updates
# self.listening_group = 'placeholder'
# pub.Publisher.subscribe(self.listen_to_updateUI, self.listening_group)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(item=self.rtc, proportion=-1, flag=wx.EXPAND)
self.SetSizer(sizer)
####### Modify methods from base class to fit current needs #########
def update_globalUI(self):
'''Method should be modified as to update other panels in response to changes in own panel.
Preferred use is through sending a message using the pub module.
Example use is: pub.Publisher.sendMessage('feature_list_updateUI', '').
The first string is the "listening group" and deterimines which listeners get the message.
The second string is the message and is unimportant for this implementation.
The listening group assigned here (to identify recipients) must be different from the listening group assigned in __init__ (to subscribe to messages).'''
pass
def update_ownUI(self):
'''Updates all fields depending on which feature is chosen'''
pass
#####################################################################
def OnKeyPress(self, evt):
print('keypress')
def clear(self):
'''Remove any text already in the Output Panel'''
self.rtc.Clear()
def write(self, string, stringtype):
'''General method for printing to the Output Panel'''
if stringtype == 'DNA':
self.rtc.BeginTextColour('#009999')
elif stringtype == 'DNAcolor':
#this is too slow!
self.rtc.WriteText(string)
color = '#333333'
self.rtc.attr.SetTextColour(color)
self.rtc.SetStyleEx(rt.RichTextRange(insertionpoint, insertionpoint+30), self.attr)
previousbase = ''
string=string.upper()
i = 0
[(self.rtc.attr.SetTextColour('#33CC00'), self.rtc.SetStyleEx(rt.RichTextRange(insertionpoint + i, insertionpoint + i+1), self.rtc.attr)) for base in string for i in xrange(len(string)) if base =='A']
# for base in string:
# start = insertionpoint + i
# end = start + 1
# if base == '-': color = '#000000'
# elif base == 'A': color = '#33CC00'
# elif base == 'T': color = '#CC0000'
# elif base == 'C': color = '#0066CC'
# elif base == 'G': color = '#000000'
# elif base == 'N': color = '#FF00CC'
# else: color = '#FF6600'
#
#
# self.attr.SetTextColour(color)
# self.SetStyleEx(rt.RichTextRange(start, end), self.attr)
# i += 1
elif stringtype == 'Protein':
self.rtc.BeginTextColour('#CC6600')
elif stringtype == 'Text':
self.rtc.BeginTextColour('#333333')
elif stringtype == 'File':
self.rtc.BeginTextColour('#330099')
elif stringtype == 'Barcode':
self.rtc.BeginTextColour('#FF00FF')
self.rtc.WriteText(string)
self.rtc.EndTextColour()
if stringtype == 'Replace':
self.rtc.BeginTextColour('#333333')
self.rtc.SetValue(string)
self.rtc.EndTextColour()
def write_image(self, image):
'''General method for printing images to the Output Panel'''
pass
# self.WriteImage(images._rt_smiley.GetImage())
#
# bool WriteBitmap(self, bitmap, bitmapType)
# Write a bitmap at the current insertion point.
# bool WriteImage(self, image, bitmapType)
# Write an image at the current insertion point.
# bool WriteImageBlock(self, imageBlock)
# Write an image block at the current insertion point.
# bool WriteImageFile(self, filename, bitmapType)
# Load an image from file and write at the current insertion point.
if __name__ == '__main__': #if script is run by itself and not loaded
app = wx.App() # creation of the wx.App object (initialisation of the wxpython toolkit)
frame = wx.Frame(None, title="Output Panel") # creation of a Frame with a title
frame.output = create(frame, style=wx.VSCROLL|wx.HSCROLL) # creation of a richtextctrl in the frame
frame.output.write('CACC', 'DNA') #testing..
frame.Show() # frames are invisible by default so we use Show() to make them visible
app.MainLoop() # here the app enters a loop waiting for user input
|
gpl-3.0
| 1,293,076,373,429,063,700
| 34.732919
| 204
| 0.682079
| false
| 3.348661
| false
| false
| false
|
aino/django-aislug
|
tests/aislug_tests/models.py
|
1
|
1161
|
from aislug import AISlugField
from django.db import models
class Item(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField()
class ItemUpdateFalse(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(update=False)
class ItemSlugify(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(slugify=lambda x: x)
class ItemInvalidList(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(invalid=['invalid'])
class ItemInvalidCallback(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(invalid=lambda: ['invalid'])
class ItemPopulateFromProperty(models.Model):
name = models.CharField(max_length=100)
slug = AISlugField(populate_from='name')
class ItemPopulateFromMethod(models.Model):
name = models.CharField(max_length=100)
slug = AISlugField(populate_from='meth')
def meth(self):
return self.name
class ItemUniqueFor(models.Model):
title = models.CharField(max_length=100)
category = models.CharField(max_length=100)
slug = AISlugField(unique_for=['category'])
|
bsd-3-clause
| 2,764,682,284,138,301,000
| 28.025
| 51
| 0.727821
| false
| 3.414706
| false
| false
| false
|
Eric89GXL/vispy
|
examples/demo/gloo/grayscott.py
|
2
|
7375
|
# -*- coding: utf-8 -*-
# vispy: gallery 2000
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# Author: Nicolas P .Rougier
# Date: 06/03/2014
# Abstract: GPU computing using the framebuffer
# Keywords: framebuffer, GPU computing, reaction-diffusion
# -----------------------------------------------------------------------------
from __future__ import division
import numpy as np
from vispy.gloo import (Program, FrameBuffer, RenderBuffer, set_viewport,
clear, set_state)
from vispy import app
render_vertex = """
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
v_texcoord = texcoord;
}
"""
render_fragment = """
uniform int pingpong;
uniform sampler2D texture;
varying vec2 v_texcoord;
void main()
{
float v;
if( pingpong == 0 )
v = texture2D(texture, v_texcoord).r;
else
v = texture2D(texture, v_texcoord).b;
gl_FragColor = vec4(1.0-v, 1.0-v, 1.0-v, 1.0);
}
"""
compute_vertex = """
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
v_texcoord = texcoord;
}
"""
compute_fragment = """
uniform int pingpong;
uniform sampler2D texture; // U,V:= r,g, other channels ignored
uniform sampler2D params; // rU,rV,f,k := r,g,b,a
uniform float dx; // horizontal distance between texels
uniform float dy; // vertical distance between texels
uniform float dd; // unit of distance
uniform float dt; // unit of time
varying vec2 v_texcoord;
void main(void)
{
float center = -(4.0+4.0/sqrt(2.0)); // -1 * other weights
float diag = 1.0/sqrt(2.0); // weight for diagonals
vec2 p = v_texcoord; // center coordinates
vec2 c,l;
if( pingpong == 0 ) {
c = texture2D(texture, p).rg; // central value
// Compute Laplacian
l = ( texture2D(texture, p + vec2(-dx,-dy)).rg
+ texture2D(texture, p + vec2( dx,-dy)).rg
+ texture2D(texture, p + vec2(-dx, dy)).rg
+ texture2D(texture, p + vec2( dx, dy)).rg) * diag
+ texture2D(texture, p + vec2(-dx, 0.0)).rg
+ texture2D(texture, p + vec2( dx, 0.0)).rg
+ texture2D(texture, p + vec2(0.0,-dy)).rg
+ texture2D(texture, p + vec2(0.0, dy)).rg
+ c * center;
} else {
c = texture2D(texture, p).ba; // central value
// Compute Laplacian
l = ( texture2D(texture, p + vec2(-dx,-dy)).ba
+ texture2D(texture, p + vec2( dx,-dy)).ba
+ texture2D(texture, p + vec2(-dx, dy)).ba
+ texture2D(texture, p + vec2( dx, dy)).ba) * diag
+ texture2D(texture, p + vec2(-dx, 0.0)).ba
+ texture2D(texture, p + vec2( dx, 0.0)).ba
+ texture2D(texture, p + vec2(0.0,-dy)).ba
+ texture2D(texture, p + vec2(0.0, dy)).ba
+ c * center;
}
float u = c.r; // compute some temporary
float v = c.g; // values which might save
float lu = l.r; // a few GPU cycles
float lv = l.g;
float uvv = u * v * v;
vec4 q = texture2D(params, p).rgba;
float ru = q.r; // rate of diffusion of U
float rv = q.g; // rate of diffusion of V
float f = q.b; // some coupling parameter
float k = q.a; // another coupling parameter
float du = ru * lu / dd - uvv + f * (1.0 - u); // Gray-Scott equation
float dv = rv * lv / dd + uvv - (f + k) * v; // diffusion+-reaction
u += du * dt;
v += dv * dt;
if( pingpong == 1 ) {
gl_FragColor = vec4(clamp(u, 0.0, 1.0), clamp(v, 0.0, 1.0), c);
} else {
gl_FragColor = vec4(c, clamp(u, 0.0, 1.0), clamp(v, 0.0, 1.0));
}
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title='Grayscott Reaction-Diffusion',
size=(512, 512), keys='interactive')
self.scale = 4
self.comp_size = self.size
comp_w, comp_h = self.comp_size
dt = 1.0
dd = 1.5
species = {
# name : [r_u, r_v, f, k]
'Bacteria 1': [0.16, 0.08, 0.035, 0.065],
'Bacteria 2': [0.14, 0.06, 0.035, 0.065],
'Coral': [0.16, 0.08, 0.060, 0.062],
'Fingerprint': [0.19, 0.05, 0.060, 0.062],
'Spirals': [0.10, 0.10, 0.018, 0.050],
'Spirals Dense': [0.12, 0.08, 0.020, 0.050],
'Spirals Fast': [0.10, 0.16, 0.020, 0.050],
'Unstable': [0.16, 0.08, 0.020, 0.055],
'Worms 1': [0.16, 0.08, 0.050, 0.065],
'Worms 2': [0.16, 0.08, 0.054, 0.063],
'Zebrafish': [0.16, 0.08, 0.035, 0.060]
}
P = np.zeros((comp_h, comp_w, 4), dtype=np.float32)
P[:, :] = species['Unstable']
UV = np.zeros((comp_h, comp_w, 4), dtype=np.float32)
UV[:, :, 0] = 1.0
r = 32
UV[comp_h // 2 - r:comp_h // 2 + r,
comp_w // 2 - r:comp_w // 2 + r, 0] = 0.50
UV[comp_h // 2 - r:comp_h // 2 + r,
comp_w // 2 - r:comp_w // 2 + r, 1] = 0.25
UV += np.random.uniform(0.0, 0.01, (comp_h, comp_w, 4))
UV[:, :, 2] = UV[:, :, 0]
UV[:, :, 3] = UV[:, :, 1]
self.pingpong = 1
self.compute = Program(compute_vertex, compute_fragment, 4)
self.compute["params"] = P
self.compute["texture"] = UV
self.compute["position"] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)]
self.compute["texcoord"] = [(0, 0), (0, 1), (1, 0), (1, 1)]
self.compute['dt'] = dt
self.compute['dx'] = 1.0 / comp_w
self.compute['dy'] = 1.0 / comp_h
self.compute['dd'] = dd
self.compute['pingpong'] = self.pingpong
self.render = Program(render_vertex, render_fragment, 4)
self.render["position"] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)]
self.render["texcoord"] = [(0, 0), (0, 1), (1, 0), (1, 1)]
self.render["texture"] = self.compute["texture"]
self.render['pingpong'] = self.pingpong
self.fbo = FrameBuffer(self.compute["texture"],
RenderBuffer(self.comp_size))
set_state(depth_test=False, clear_color='black')
self._timer = app.Timer('auto', connect=self.update, start=True)
self.show()
def on_draw(self, event):
with self.fbo:
set_viewport(0, 0, *self.comp_size)
self.compute["texture"].interpolation = 'nearest'
self.compute.draw('triangle_strip')
clear(color=True)
set_viewport(0, 0, *self.physical_size)
self.render["texture"].interpolation = 'linear'
self.render.draw('triangle_strip')
self.pingpong = 1 - self.pingpong
self.compute["pingpong"] = self.pingpong
self.render["pingpong"] = self.pingpong
def on_resize(self, event):
set_viewport(0, 0, *self.physical_size)
if __name__ == '__main__':
canvas = Canvas()
app.run()
|
bsd-3-clause
| -7,063,744,697,492,145,000
| 34.119048
| 79
| 0.510644
| false
| 3.072917
| false
| false
| false
|
stonemary/lintcode_solutions
|
search-a-2d-matrix/1.py
|
1
|
1214
|
# iterative
# time: over time, bug on helper function. ~ 18mins
class Solution:
"""
@param matrix, a list of lists of integers
@param target, an integer
@return a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
if matrix is None or matrix == []:
return False
# width: m, height: n
n = len(matrix)
m = len(matrix[0])
start = 0
end = m * n - 1
mid = (start + end) / 2
while start + 1 < end:
mid_value = self.get_element(matrix, mid, m)
if mid_value == target:
return True
if mid_value < target:
start = mid
if mid_value > target:
end = mid
mid = (start + end) / 2
else:
if self.get_element(matrix, start, m) == target:
return True
if self.get_element(matrix, end, m) == target:
return True
return False
def get_element(self, matrix, i, m):
index_1 = i / m
index_2 = i % m
return matrix[index_1][index_2]
|
apache-2.0
| 3,740,729,000,668,610,000
| 27.904762
| 62
| 0.476936
| false
| 4.229965
| false
| false
| false
|
OmkarPathak/pygorithm
|
pygorithm/data_structures/quadtree.py
|
1
|
23006
|
"""
Author: Timothy Moore
Created On: 31th August 2017
Defines a two-dimensional quadtree of arbitrary
depth and bucket size.
"""
import inspect
import math
from collections import deque
from pygorithm.geometry import (vector2, polygon2, rect2)
class QuadTreeEntity(object):
"""
This is the minimum information required for an object to
be usable in a quadtree as an entity. Entities are the
things that you are trying to compare in a quadtree.
:ivar aabb: the axis-aligned bounding box of this entity
:type aabb: :class:`pygorithm.geometry.rect2.Rect2`
"""
def __init__(self, aabb):
"""
Create a new quad tree entity with the specified aabb
:param aabb: axis-aligned bounding box
:type aabb: :class:`pygorithm.geometry.rect2.Rect2`
"""
self.aabb = aabb
def __repr__(self):
"""
Create an unambiguous representation of this entity.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
_ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5))
# prints quadtreeentity(aabb=rect2(width=5, height=5, mincorner=vector2(x=0, y=0)))
print(repr(_ent))
:returns: unambiguous representation of this quad tree entity
:rtype: string
"""
return "quadtreeentity(aabb={})".format(repr(self.aabb))
def __str__(self):
"""
Create a human readable representation of this entity
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
_ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5))
# prints entity(at rect(5x5 at <0, 0>))
print(str(_ent))
:returns: human readable representation of this entity
:rtype: string
"""
return "entity(at {})".format(str(self.aabb))
class QuadTree(object):
"""
A quadtree is a sorting tool for two-dimensional space, most
commonly used to reduce the number of required collision
calculations in a two-dimensional scene. In this context,
the scene is stepped without collision detection, then a
quadtree is constructed from all of the boundaries
.. caution::
Just because a quad tree has split does not mean entities will be empty. Any
entities which overlay any of the lines of the split will be included in the
parent of the quadtree.
.. tip::
It is important to tweak bucket size and depth to the problem, but a common error
is too small a bucket size. It is typically not reasonable to have a bucket size
smaller than 16; A good starting point is 64, then modify as appropriate. Larger
buckets reduce the overhead of the quad tree which could easily exceed the improvement
from reduced collision checks. The max depth is typically just a sanity check since
depth greater than 4 or 5 would either indicate a badly performing quadtree (too
dense objects, use an r-tree or kd-tree) or a very large world (where an iterative
quadtree implementation would be appropriate).
:ivar bucket_size: maximum number objects per bucket (before :py:attr:`.max_depth`)
:type bucket_size: int
:ivar max_depth: maximum depth of the quadtree
:type max_depth: int
:ivar depth: the depth of this node (0 being the topmost)
:type depth: int
:ivar location: where this quad tree node is situated
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:ivar entities: the entities in this quad tree and in NO OTHER related quad tree
:type entities: list of :class:`.QuadTreeEntity`
:ivar children: either None or the 4 :class:`.QuadTree` children of this node
:type children: None or list of :class:`.QuadTree`
"""
def __init__(self, bucket_size, max_depth, location, depth = 0, entities = None):
"""
Initialize a new quad tree.
.. warning::
Passing entities to this quadtree will NOT cause it to split automatically!
You must call :py:meth:`.think` for that. This allows for more predictable
performance per line.
:param bucket_size: the number of entities in this quadtree
:type bucket_size: int
:param max_depth: the maximum depth for automatic splitting
:type max_depth: int
:param location: where this quadtree is located
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:param depth: the depth of this node
:type depth: int
:param entities: the entities to initialize this quadtree with
:type entities: list of :class:`.QuadTreeEntity` or None for empty list
"""
self.bucket_size = bucket_size
self.max_depth = max_depth
self.location = location
self.depth = depth
self.entities = entities if entities is not None else []
self.children = None
def think(self, recursive = False):
"""
Call :py:meth:`.split` if appropriate
Split this quad tree if it has not split already and it has more
entities than :py:attr:`.bucket_size` and :py:attr:`.depth` is
less than :py:attr:`.max_depth`.
If `recursive` is True, think is called on the :py:attr:`.children` with
recursive set to True after splitting.
:param recursive: if `think(True)` should be called on :py:attr:`.children` (if there are any)
:type recursive: bool
"""
if not self.children and self.depth < self.max_depth and len(self.entities) > self.bucket_size:
self.split()
if recursive:
if self.children:
for child in self.children:
child.think(True)
def split(self):
"""
Split this quadtree.
.. caution::
A call to split will always split the tree or raise an error. Use
:py:meth:`.think` if you want to ensure the quadtree is operating
efficiently.
.. caution::
This function will not respect :py:attr:`.bucket_size` or
:py:attr:`.max_depth`.
:raises ValueError: if :py:attr:`.children` is not empty
"""
if self.children:
raise ValueError("cannot split twice")
_cls = type(self)
def _cstr(r):
return _cls(self.bucket_size, self.max_depth, r, self.depth + 1)
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
self.children = [
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y + _halfheight))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y + _halfheight))) ]
_newents = []
for ent in self.entities:
quad = self.get_quadrant(ent)
if quad < 0:
_newents.append(ent)
else:
self.children[quad].entities.append(ent)
self.entities = _newents
def get_quadrant(self, entity):
"""
Calculate the quadrant that the specified entity belongs to.
Touching a line is considered overlapping a line. Touching is
determined using :py:meth:`math.isclose`
Quadrants are:
- -1: None (it overlaps 2 or more quadrants)
- 0: Top-left
- 1: Top-right
- 2: Bottom-right
- 3: Bottom-left
.. caution::
This function does not verify the entity is contained in this quadtree.
This operation takes O(1) time.
:param entity: the entity to place
:type entity: :class:`.QuadTreeEntity`
:returns: quadrant
:rtype: int
"""
_aabb = entity.aabb
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
if math.isclose(_aabb.mincorner.x, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.x + _aabb.width, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.y, _y + _halfheight):
return -1
if math.isclose(_aabb.mincorner.y + _aabb.height, _y + _halfheight):
return -1
_leftside_isleft = _aabb.mincorner.x < _x + _halfwidth
_rightside_isleft = _aabb.mincorner.x + _aabb.width < _x + _halfwidth
if _leftside_isleft != _rightside_isleft:
return -1
_topside_istop = _aabb.mincorner.y < _y + _halfheight
_botside_istop = _aabb.mincorner.y + _aabb.height < _y + _halfheight
if _topside_istop != _botside_istop:
return -1
_left = _leftside_isleft
_top = _topside_istop
if _left:
if _top:
return 0
else:
return 3
else:
if _top:
return 1
else:
return 2
def insert_and_think(self, entity):
"""
Insert the entity into this or the appropriate child.
This also acts as thinking (recursively). Using :py:meth:`.insert_and_think`
iteratively is slightly less efficient but has more predictable performance
than initializing with a large number of entities then thinking is slightly
faster but may hang. Both may exceed recursion depth if :py:attr:`.max_depth`
is too large.
:param entity: the entity to insert
:type entity: :class:`.QuadTreeEntity`
"""
if not self.children and len(self.entities) == self.bucket_size and self.depth < self.max_depth:
self.split()
quad = self.get_quadrant(entity) if self.children else -1
if quad < 0:
self.entities.append(entity)
else:
self.children[quad].insert_and_think(entity)
def retrieve_collidables(self, entity, predicate = None):
"""
Find all entities that could collide with the specified entity.
.. warning::
If entity is, itself, in the quadtree, it will be returned. The
predicate may be used to prevent this using your preferred equality
method.
The predicate takes 1 positional argument (the entity being considered)
and returns `False` if the entity should never be returned, even if it
might collide with the entity. It should return `True` otherwise.
:param entity: the entity to find collidables for
:type entity: :class:`.QuadTreeEntity`
:param predicate: the predicate
:type predicate: :class:`types.FunctionType` or None
:returns: potential collidables (never `None)
:rtype: list of :class:`.QuadTreeEntity`
"""
result = list(filter(predicate, self.entities))
quadrant = self.get_quadrant(entity) if self.children else -1
if quadrant >= 0:
result.extend(self.children[quadrant].retrieve_collidables(entity, predicate))
elif self.children:
for child in self.children:
touching, overlapping, alwaysNone = rect2.Rect2.find_intersection(entity.aabb, child.location, find_mtv=False)
if touching or overlapping:
result.extend(child.retrieve_collidables(entity, predicate))
return result
def _iter_helper(self, pred):
"""
Calls pred on each child and childs child, iteratively.
pred takes one positional argument (the child).
:param pred: function to call
:type pred: `types.FunctionType`
"""
_stack = deque()
_stack.append(self)
while _stack:
curr = _stack.pop()
if curr.children:
for child in curr.children:
_stack.append(child)
pred(curr)
def find_entities_per_depth(self):
"""
Calculate the number of nodes and entities at each depth level in this
quad tree. Only returns for depth levels at or equal to this node.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of entities
:rtype: dict int: int
"""
container = { 'result': {} }
def handler(curr, container=container):
container['result'][curr.depth] = container['result'].get(curr.depth, 0) + len(curr.entities)
self._iter_helper(handler)
return container['result']
def find_nodes_per_depth(self):
"""
Calculate the number of nodes at each depth level.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of nodes
:rtype: dict int: int
"""
nodes_per_depth = {}
self._iter_helper(lambda curr, d=nodes_per_depth: d.update({ (curr.depth, d.get(curr.depth, 0) + 1) }))
return nodes_per_depth
def sum_entities(self, entities_per_depth=None):
"""
Sum the number of entities in this quad tree and all lower quad trees.
If `entities_per_depth` is not None, that array is used to calculate the sum
of entities rather than traversing the tree. Either way, this is implemented
iteratively. See :py:meth:`.__str__` for usage example.
:param entities_per_depth: the result of :py:meth:`.find_entities_per_depth`
:type entities_per_depth: `dict int: (int, int)` or None
:returns: number of entities in this and child nodes
:rtype: int
"""
if entities_per_depth is not None:
return sum(entities_per_depth.values())
container = { 'result': 0 }
def handler(curr, container=container):
container['result'] += len(curr.entities)
self._iter_helper(handler)
return container['result']
def calculate_avg_ents_per_leaf(self):
"""
Calculate the average number of entities per leaf node on this and child
quad trees.
In the ideal case, the average entities per leaf is equal to the bucket size,
implying maximum efficiency. Note that, as always with averages, this might
be misleading if this tree has reached its max depth.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: average number of entities at each leaf node
:rtype: :class:`numbers.Number`
"""
container = { 'leafs': 0, 'total': 0 }
def handler(curr, container=container):
if not curr.children:
container['leafs'] += 1
container['total'] += len(curr.entities)
self._iter_helper(handler)
return container['total'] / container['leafs']
def calculate_weight_misplaced_ents(self, sum_entities=None):
"""
Calculate a rating for misplaced entities.
A misplaced entity is one that is not on a leaf node. That weight is multiplied
by 4*remaining maximum depth of that node, to indicate approximately how
many additional calculations are required.
The result is then divided by the total number of entities on this node (either
calculated using :py:meth:`.sum_entities` or provided) to get the approximate
cost of the misplaced nodes in comparison with the placed nodes. A value greater
than 1 implies a different tree type (such as r-tree or kd-tree) should probably be
used.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:param sum_entities: the number of entities on this node
:type sum_entities: int or None
:returns: weight of misplaced entities
:rtype: :class:`numbers.Number`
"""
# this iteration requires more context than _iter_helper provides.
# we must keep track of parents as well in order to correctly update
# weights
nonleaf_to_max_child_depth_dict = {}
# stack will be (quadtree, list (of parents) or None)
_stack = deque()
_stack.append((self, None))
while _stack:
curr, parents = _stack.pop()
if parents:
for p in parents:
nonleaf_to_max_child_depth_dict[p] = max(nonleaf_to_max_child_depth_dict.get(p, 0), curr.depth)
if curr.children:
new_parents = list(parents) if parents else []
new_parents.append(curr)
for child in curr.children:
_stack.append((child, new_parents))
_weight = 0
for nonleaf, maxchilddepth in nonleaf_to_max_child_depth_dict.items():
_weight += len(nonleaf.entities) * 4 * (maxchilddepth - nonleaf.depth)
_sum = self.sum_entities() if sum_entities is None else sum_entities
return _weight / _sum
def __repr__(self):
"""
Create an unambiguous representation of this quad tree.
This is implemented iteratively.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(1, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(bucket_size=1, max_depth=5, location=rect2(width=100, height=100, mincorner=vector2(x=0, y=0)), depth=0, entities=[], children=[quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=5, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=95, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=50.0)), depth=1, entities=[], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=50.0)), depth=1, entities=[], children=None)])
:returns: unambiguous, recursive representation of this quad tree
:rtype: string
"""
return "quadtree(bucket_size={}, max_depth={}, location={}, depth={}, entities={}, children={})".format(self.bucket_size, self.max_depth, repr(self.location), self.depth, self.entities, self.children)
def __str__(self):
"""
Create a human-readable representation of this quad tree
.. caution::
Because of the complexity of quadtrees it takes a fair amount of calculation to
produce something somewhat legible. All returned statistics have paired functions.
This uses only iterative algorithms to calculate statistics.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(2, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(at rect(100x100 at <0, 0>) with 0 entities here (2 in total); (nodes, entities) per depth: [ 0: (1, 0), 1: (4, 2) ] (allowed max depth: 5, actual: 1), avg ent/leaf: 0.5 (target 1), misplaced weight 0.0 (0 best, >1 bad)
print(_tree)
:returns: human-readable representation of this quad tree
:rtype: string
"""
nodes_per_depth = self.find_nodes_per_depth()
_ents_per_depth = self.find_entities_per_depth()
_nodes_ents_per_depth_str = "[ {} ]".format(', '.join("{}: ({}, {})".format(dep, nodes_per_depth[dep], _ents_per_depth[dep]) for dep in nodes_per_depth.keys()))
_sum = self.sum_entities(entities_per_depth=_ents_per_depth)
_max_depth = max(_ents_per_depth.keys())
_avg_ent_leaf = self.calculate_avg_ents_per_leaf()
_mispl_weight = self.calculate_weight_misplaced_ents(sum_entities=_sum)
return "quadtree(at {} with {} entities here ({} in total); (nodes, entities) per depth: {} (allowed max depth: {}, actual: {}), avg ent/leaf: {} (target {}), misplaced weight {} (0 best, >1 bad)".format(self.location, len(self.entities), _sum, _nodes_ents_per_depth_str, self.max_depth, _max_depth, _avg_ent_leaf, self.bucket_size, _mispl_weight)
@staticmethod
def get_code():
"""
Get the code for the QuadTree class
:returns: code for QuadTree
:rtype: string
"""
return inspect.getsource(QuadTree)
|
mit
| 3,215,054,708,170,512,000
| 39.937722
| 899
| 0.586325
| false
| 4.189765
| false
| false
| false
|
maheshgaya/lips-with-maps
|
machine-learning/python/first_model.py
|
1
|
4736
|
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn import preprocessing
# importing data and munging
constant_data = pd.read_csv('full_library_xt875.csv')
#normalizing data
#normalization = lambda df: (df - df.mean()) / (df.max() - df.min())
#constant_data = normalization(constant_data)
t_data = constant_data[:2787]
pred_data = t_data[['xPosition', 'yPosition']]
t_data = t_data.drop('Latitude', 1)
t_data = t_data.drop('Longitude', 1)
t_data = t_data.drop('xPosition', 1)
t_data = t_data.drop('yPosition', 1)
tp_data = constant_data[2789:]
pred_t_data = tp_data[['xPosition', 'yPosition']]
tp_data = tp_data.drop('Latitude', 1)
tp_data = tp_data.drop('Longitude', 1)
tp_data = tp_data.drop('xPosition', 1)
tp_data = tp_data.drop('yPosition', 1)
example = constant_data[2789:2791]
testing = example[['xPosition', 'yPosition']]
example = example.drop('Latitude',1)
example = example.drop('Longitude',1)
example = example.drop('xPosition',1)
example = example.drop('yPosition',1)
print(len(tp_data))
print(pred_data)
#paramters
learning_rate = 0.005
training_epochs = 100000
batch_size = 100
display_step = 1
#network paramters
n_input = 170
n_classes = 2
n_hidden_1 = 86
n_hidden_2 = 52
n_hidden_3 = 21
n_hidden_4 = 13
#tf Graph input
x = tf.placeholder('float', [None, n_input])
y = tf.placeholder('float', [None, n_classes])
#create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with relu activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with relu activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with relu activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with relu activation
'''layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)'''
#output layer with linear activation
out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
#'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),
'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
#'b4': tf.Variable(tf.random_normal([n_hidden_4])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
'''Convergence occurs at a loss at 779.46
May want to normalize the data to see if a reduction in
the error may occur (look up online)
'''
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
saver = tf.train.Saver()
# Initializing the variables
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
total_batch_test = int(len(t_data)/batch_size)
#training cycle
for epoch in range(training_epochs):
ptr = 0
avg_cost = 0.0
total_batch = int(len(t_data)/batch_size)
#loop over all batches
for i in range(total_batch):
inp, out = t_data[ptr:ptr+total_batch], pred_data[ptr:ptr+total_batch]
ptr+=batch_size
_, c = sess.run([optimizer, cost], feed_dict={x: inp, y: out})
# Compute average loss
avg_cost += c / total_batch
#print(avg_cost)
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%06d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
if avg_cost <= 0.11 and epoch > 100:
break
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#print("Accuracy:", accuracy.eval({x: tp_data[0:length],
# y: pred_t_data[0:length]}))
feed_dict = {x: example, y: testing}
#classi = pred.eval(feed_dict)
print(sess.run(pred, feed_dict))
saver.save(sess, '/Users/Joel/Desktop/Research/lgps.ckpt')
# Ignore Latitude and Longitude
# Predict formula for converstion of x and y position infer Latitude and Longitude
|
apache-2.0
| -4,449,675,825,437,727,000
| 29.753247
| 82
| 0.644848
| false
| 3.018483
| false
| false
| false
|
mablae/weblate
|
weblate/trans/south_migrations/0027_auto__chg_field_subproject_template.py
|
1
|
15556
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
depends_on = (
('lang', '0003_auto__add_field_language_plural_type'),
)
def forwards(self, orm):
# Changing field 'SubProject.template'
db.alter_column('trans_subproject', 'template', self.gf('django.db.models.fields.CharField')(max_length=200, null=False))
def backwards(self, orm):
# Changing field 'SubProject.template'
db.alter_column('trans_subproject', 'template', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lang.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'direction': ('django.db.models.fields.CharField', [], {'default': "'ltr'", 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'plural_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'trans.change': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Change'},
'action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Translation']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'trans.check': {
'Meta': {'object_name': 'Check'},
'check': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"})
},
'trans.comment': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Comment'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.dictionary': {
'Meta': {'ordering': "['source']", 'object_name': 'Dictionary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'trans.indexupdate': {
'Meta': {'object_name': 'IndexUpdate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']"})
},
'trans.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project'},
'commit_message': ('django.db.models.fields.TextField', [], {'default': "'Translated using Weblate (%(language_name)s)\\n\\nCurrently translated at %(translated_percent)s%% (%(translated)s of %(total)s strings)'"}),
'committer_email': ('django.db.models.fields.EmailField', [], {'default': "'noreply@weblate.org'", 'max_length': '75'}),
'committer_name': ('django.db.models.fields.CharField', [], {'default': "'Weblate'", 'max_length': '200'}),
'enable_acl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'merge_style': ('django.db.models.fields.CharField', [], {'default': "'merge'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'new_lang': ('django.db.models.fields.CharField', [], {'default': "'contact'", 'max_length': '10'}),
'push_on_commit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'set_translation_team': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'web': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'trans.subproject': {
'Meta': {'ordering': "['project__name', 'name']", 'unique_together': "(('project', 'name'), ('project', 'slug'))", 'object_name': 'SubProject'},
'allow_translation_propagation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'branch': ('django.db.models.fields.CharField', [], {'default': "'master'", 'max_length': '50'}),
'file_format': ('django.db.models.fields.CharField', [], {'default': "'auto'", 'max_length': '50'}),
'filemask': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'push': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'report_source_bugs': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'repoweb': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'trans.suggestion': {
'Meta': {'object_name': 'Suggestion'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'target': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.translation': {
'Meta': {'ordering': "['language__name']", 'object_name': 'Translation'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'fuzzy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'lock_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'lock_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'subproject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.SubProject']"}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'translated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'trans.unit': {
'Meta': {'ordering': "['position']", 'object_name': 'Unit'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'flags': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'fuzzy': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'previous_source': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {}),
'target': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'translated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Translation']"})
}
}
complete_apps = ['trans']
|
gpl-3.0
| -2,850,618,282,719,229,000
| 74.868293
| 227
| 0.553462
| false
| 3.695177
| false
| false
| false
|
NoRedInk/elm-ops-tooling
|
elm_deps_upgrade.py
|
1
|
4701
|
#! /usr/bin/env python
from __future__ import print_function
import sys
import json
import requests
import struct
import argparse
def load_all_packages(elm_version, url=None):
if url is None:
url = "http://package.elm-lang.org/all-packages?elm-package-version="
payload = requests.get("{url}{elm_version}".format(
url=url,
elm_version=elm_version
))
return { item['name'] : item for item in payload.json() }
def load_versions(package_name, url=None):
if url is None:
url = "http://package.elm-lang.org/versions?name="
payload = requests.get("{url}{package_name}".format(
url=url,
package_name=package_name
))
return payload.content
def load_local_packages(elm_package):
with open(elm_package) as f:
return json.load(f)['dependencies']
def top_range(field):
only_end = field[field.index('v'):]
if '=' in only_end:
return only_end.split('=')[-1].strip()
if '<' in only_end:
number = only_end.split('<')[-1].strip()
if patch(number) == 0:
if minor(number) == 0:
return '{maj}.{min}.{pat}'.format(
maj = major(number) - 1,
min = 9999999,
pat = 0 )
return '{maj}.{min}.{pat}'.format(
maj = major(number) - 1,
min = minor(number) - 1,
pat = 0 )
return '{maj}.{min}.{pat}'.format(
maj = major(number) - 1,
min = minor(number) - 1,
pat = patch(number) - 1 )
def major(version):
return int(version.split('.')[0])
def minor(version):
return int(version.split('.')[1])
def patch(version):
return int(version.split('.')[2])
def get_major_upgrades(top, versions):
major_top = major(top)
return [ version for version in versions if major(version) > major_top ]
def get_minor_upgrades(top, versions):
major_top = major(top)
minor_top = minor(top)
return [ version for version in versions if minor(version) > minor_top and major(version) == major_top ]
def get_patch_upgrades(top, versions):
major_top = major(top)
minor_top = minor(top)
patch_top = patch(top)
return [ version for version in versions
if major(version) == major_top and minor_top == minor(version) and patch_top > patch(version) ]
def find_newer_versions(local_deps, remote_deps):
upgrade_suggestions = {}
for (dep, version) in local_deps.items():
if dep not in remote_deps:
continue
current_version = top_range(version)
patches = get_patch_upgrades(current_version, remote_deps[dep]['versions'])
minors = get_minor_upgrades(current_version, remote_deps[dep]['versions'])
majors = get_major_upgrades(current_version, remote_deps[dep]['versions'])
upgrade_suggestions[dep] = {
'patches': patches,
'minors': minors,
'majors': majors
}
return upgrade_suggestions
def newest_version(suggestions):
if suggestions['majors']:
return suggestions['majors'][-1]
elif suggestions['minors']:
return suggestions['majors'][-1]
else:
return suggestions['patches'][-1]
def print_newer_versions(local_deps, remote_deps):
upgrade_suggestions = []
for (dep, suggestions) in find_newer_versions(local_deps, remote_deps).items():
patches = suggestions['patches']
minors = suggestions['minors']
majors = suggestions['majors']
if len(patches) > 0:
upgrade_suggestions.append(
'Patches available for {dep}: [{patches}]'.format(dep=dep, patches=', '.join(patches))
)
if len(minors) > 0:
upgrade_suggestions.append(
'Minors available for {dep}: [{minors}]'.format(dep=dep, minors=', '.join(minors))
)
if len(majors) > 0:
upgrade_suggestions.append(
'Majors available for {dep}: [{majors}]'.format(dep=dep, majors=', '.join(majors))
)
if not upgrade_suggestions:
print('No upgrades available')
else:
print('\n'.join(upgrade_suggestions))
def main():
parser = argparse.ArgumentParser(description='Check deps file for possible upgrades')
parser.add_argument('--elm-version', help='specify your current elm version', default='0.18')
parser.add_argument('local')
args = parser.parse_args()
local = load_local_packages(args.local)
remote = load_all_packages(args.elm_version)
print_newer_versions(local, remote)
if __name__ == '__main__':
main()
|
bsd-3-clause
| 6,115,484,445,367,418,000
| 27.840491
| 108
| 0.589236
| false
| 3.71327
| false
| false
| false
|
ledtvavs/repository.ledtv
|
script.module.nanscrapers/lib/nanscrapers/scraperplugins/onlinemovies.py
|
7
|
2640
|
import re
import requests
from ..scraper import Scraper
class Onlinemovies(Scraper):
domains = ['onlinemovies.tube']
name = "onlinemovies"
sources = []
def __init__(self):
self.base_link = 'http://onlinemovies.tube/'
def scrape_movie(self, title, year, imdb, debrid = False):
try:
start_url = self.base_link+'watch/'+title.replace(' ','-')+'-'+year+'/'
html = requests.get(start_url).text
match = re.compile('<iframe.+?src="(.+?)"').findall(html)
for url in match:
if 'google' in url:
pass
elif 'youtube' in url:
pass
elif 'openload' in url:
pass
elif 'estream' in url:
self.sources.append({'source': 'estream', 'quality': 'SD', 'scraper': self.name, 'url': url,'direct': False})
elif 'clxmovies' in url:
html2 = requests.get(url).text
match2 = re.compile('{file: "(.+?)",label:"(.+?)",type: ".+?"}').findall(html2)
for url2,p in match2:
self.sources.append({'source': 'google', 'quality': p, 'scraper': self.name, 'url': url2,'direct': True})
except:
pass
return self.sources
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
if len(season) == 1:
season = '0'+str(season)
if len(episode) == 1:
episode = '0'+str(episode)
start_url = self.base_link+'episode/'+title.replace(' ','-')+'-s'+season+'e'+episode+'/'
html = requests.get(start_url).text
match = re.compile('<iframe.+?src="(.+?)"').findall(html)
for url in match:
if 'google' in url:
pass
elif 'youtube' in url:
pass
elif 'openload' in url:
pass
elif 'estream' in url:
self.sources.append({'source': 'estream', 'quality': 'SD', 'scraper': self.name, 'url': url,'direct': False})
elif 'clxmovies' in url:
html2 = requests.get(url).text
match2 = re.compile('{file: "(.+?)",label:"(.+?)",type: ".+?"}').findall(html2)
for url2,p in match2:
self.sources.append({'source': 'google', 'quality': p, 'scraper': self.name, 'url': url2,'direct': True})
return self.sources
except:
pass
|
gpl-3.0
| 1,984,699,316,680,473,300
| 40.25
| 129
| 0.475
| false
| 4.042879
| false
| false
| false
|
jmrodri/sm-photo-tool
|
src/cookietransport.py
|
1
|
2041
|
#
# code copied from
# http://rocketscience.itteco.org/2010/01/10/sending-cookie-via-xmlrpclib/
# by Nazar Leush
#
# changes added by jesus m. rodriguez
#
import xmlrpclib
from xmlrpclib import ProtocolError, Fault
from Cookie import _quote
class CookieTransport(xmlrpclib.Transport):
def __init__(self, cookies=None, *args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.cookies = cookies
##
# Send a complete request, and parse the response.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body XML-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
# Custom cookies.
self.send_cookies(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except Fault:
raise
except Exception:
# All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
#discard any response data and raise exception
if (response.getheader("content-length", 0)):
response.read()
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
def send_cookies(self, connection):
if self.cookies:
for k, v in self.cookies.iteritems():
connection.putheader(
"Cookie", ";".join(["%s=%s" % (k, _quote(v))]))
|
gpl-2.0
| -4,333,939,322,342,350,000
| 27.746479
| 74
| 0.579128
| false
| 4.139959
| false
| false
| false
|
Reddone/CarIncidentJupyter
|
main.py
|
1
|
1735
|
import os
import pandas as pd
import utils
# Prepare the dataset for the analysis
sem_path = r"0_Sem_2014"
sem1_path = r"1_Sem_2014.csv"
sem2_path = r"2_Sem_2014.csv"
if not os.path.isfile(sem_path):
utils.join_dataframes(sem_path, sem1_path, sem2_path)
dataset = pd.read_pickle(sem_path)
# Assign correct values to columns
dataset = utils.assign_columns(dataset)
# Remove columns we don't need
dataset = utils.remove_columns(dataset)
# Fix broken values
dataset = utils.fix_columns(dataset)
# Create new features using DataOraIncidente column
dataset = utils.expand_DataOraIncidente(dataset)
# Create new features using NUM columns
dataset = utils.expand_NUM(dataset)
# Create new features using DecedutoDopo column
dataset = utils.expand_DecedutoDopo(dataset)
# Adjust NaturaIncidente column
dataset = utils.adjust_NaturaIncidente(dataset)
# Adjust ParticolaritaStrade column
dataset = utils.adjust_ParticolaritaStrade(dataset)
# Adjust FondoStradale column
dataset = utils.adjust_FondoStradale(dataset)
# Adjust Pavimentazione column
dataset = utils.adjust_Pavimentazione(dataset)
# Adjust CondizioneAtmosferica column
dataset = utils.adjust_CondizioneAtmosferica(dataset)
# Adjust Traffico column
dataset = utils.adjust_Traffico(dataset)
# Adjust TipoVeicolo column
dataset = utils.adjust_TipoVeicolo(dataset)
# Adjust TipoPersona column
dataset = utils.adjust_TipoPersona(dataset)
# Adjust AnnoNascita column
dataset = utils.adjust_AnnoNascita(dataset)
# Adjust Sesso column
dataset = utils.adjust_Sesso(dataset)
# Adjust TipoLesione column
dataset = utils.adjust_TipoLesione(dataset)
save_path = r"0_CarIncident_2014"
save_path_csv = r"0_CarIncident_2014.csv"
dataset.to_pickle(save_path)
dataset.to_csv(save_path_csv)
|
mit
| 8,604,081,585,900,436,000
| 31.12963
| 57
| 0.796542
| false
| 3.006932
| false
| false
| false
|
thinmanj/clean-bj
|
zbj.py
|
1
|
5351
|
import random
class Card(object):
SUITS = ('C', 'S', 'H', 'D')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
def __init__(self, suit, rank):
if (suit in self.SUITS) and (rank in self.VALUES.keys()):
self.suit = suit
self.rank = rank
self.value = self.VALUES[rank]
else:
self.suit = None
self.rank = None
self.value = 0
print "Invalid card: ", suit, rank
def __str__(self):
return "%s(%s)" % (self.rank, self.suit, )
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def get_value(self):
return self.value
class Hand(object):
def __init__(self):
self.clear()
def __len__(self):
return len(self.hand)
def __str__(self):
return ', '.join([str(card) for card in self.hand])
def add_card(self, card):
self.hand.append(card)
if card.get_rank() == 'A':
self.ace = True
self.value += card.get_value()
if self.value > 21:
self.value = 0
self.ace = 0
raise Exception("Busted!")
def get_value(self):
if self.ace and self.value <= 10:
return self.value + 10
else:
return self.value
def hit(self, deck):
card = deck.deal_card()
print str(card)
self.add_card(card)
def busted(self):
if self.get_value() > 21:
return True
def clear(self):
self.hand = []
self.ace = False
self.value = 0
class Deck(object):
def __init__(self):
self.clear()
def __len__(self):
return len(self.deck)
def __str__(self):
return ', '.join([str(card) for card in self.deck])
def shuffle(self):
random.shuffle(self.deck)
def deal_card(self):
return self.deck.pop()
def clear(self):
self.deck = [Card(s, r) for s in Card.SUITS for r in Card.VALUES.keys()]
self.shuffle()
class Game(object):
def __init__(self):
self.in_play = True
self.player_chips = 100
self.game_chips = 0
self.deal()
def __str__(self):
if not self.in_play:
return ""
return """
Game:
Game chips: %d
Dealer hand: %s
Player hand: %s
Player chips: %s
""" % (self.game_chips, str(self.dealer), str(self.player), self.player_chips)
def won(self):
print "You won!"
self.player_chips += 2 * self.game_chips
self.game_chips = 0
self.in_play = False
def lost(self):
print "You lost!"
self.game_chips = 0
self.in_play = False
def tie(self):
print "It's a tie!"
self.player_chips += self.game_chips
self.game_chips = 0
self.in_play = False
def deal(self, chips=1):
if self.player_chips <= 0 or chips > self.player_chips:
raise Exception("No enough chips.")
self.in_play = True
self.deck = Deck()
self.dealer = Hand()
self.player = Hand()
self.player_chips -= chips
self.game_chips = chips
for x in range(2):
self.dealer.hit(self.deck)
self.player.hit(self.deck)
def hit(self):
if not self.in_play:
return
try:
self.player.hit(self.deck)
except Exception:
self.in_play = False
print "You went bust!"
self.lost()
raise Exception("Lost!")
if self.player.get_value() == 21:
print "Black Jack!"
self.won()
raise Exception("Won!")
def stand(self):
if not self.in_play:
return
while self.in_play and self.dealer.get_value() < 17:
try:
self.dealer.hit(self.deck)
except Exception:
print "Dealer Busted!"
self.in_play = False
if self.player.get_value() > self.dealer.get_value():
self.won()
elif self.player.get_value() < self.dealer.get_value():
self.lost()
else:
self.tie()
def play():
game = Game()
try:
while True:
try:
while True:
print str(game)
selection = raw_input("Do you whan to (h)it or to (s)tand: ")
if selection == "h":
game.hit()
elif selection == "s":
game.stand()
raise Exception
else:
print "Wrong selection..."
except:
selection_flag = True
while selection_flag:
selection = raw_input("New (d)eal? or (e)xit: ")
if selection == "d":
game.deal(1)
selection_flag = False
elif selection == "e":
raise Exception
else:
print "Wrong selection..."
except:
print "See you next time"
if __name__ == "__main__":
play()
|
apache-2.0
| -2,957,766,947,423,949,300
| 23.888372
| 108
| 0.470566
| false
| 3.76301
| false
| false
| false
|
diegojromerolopez/djanban
|
src/djanban/apps/members/migrations/0001_initial.py
|
1
|
1626
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-09 18:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('api_key', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello API key')),
('api_secret', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello API secret')),
('token', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello token')),
('token_secret', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello token secret')),
('uuid', models.CharField(max_length=128, unique=True, verbose_name='Trello member uuid')),
('trello_username', models.CharField(max_length=128, verbose_name='Trello username')),
('initials', models.CharField(max_length=8, verbose_name='User initials in Trello')),
('user', models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='member', to=settings.AUTH_USER_MODEL, verbose_name='Associated user')),
],
),
]
|
mit
| 8,655,522,309,848,991,000
| 48.272727
| 201
| 0.643911
| false
| 3.880668
| false
| false
| false
|
neviim/forza
|
getarq.py
|
1
|
2158
|
#!/usr/bin env python3
# -*- coding: utf-8 -*-
import os
import csv
import json
import pymongo
from pymongo import MongoClient
# mongodb
def get_db():
client = MongoClient('localhost:27017')
db = client.forza6db
return db
def add_dados(db, data):
db.countries.insert(data)
def get_country(db, colecao):
return db.get_collection(colecao).find({}).count()
# ---
# gera arquivo csv
def gera_csv(localFilePath):
#verifica se tem arquivo com estencao .csv
if (os.path.isfile(localFilePath) and localFilePath.endswith(".csv")):
# gera arquivo csv
with open(localFilePath, 'r', encoding='utf-8') as csvfile:
#sniff
fileDialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
#cria um CSV
myReader = csv.reader(csvfile, dialect=fileDialect)
for row in myReader:
print(row)
# gera arquivo json
def gera_json(localFilePath):
if (os.path.isfile(localFilePath) and localFilePath.endswith(".csv")):
# abre banco forza
db = get_db()
# gera arquivo json
with open(localFilePath, 'r', encoding='utf-8') as csvfile:
#sniff para encontrar o formato
fileDialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
#le o arquivo CSV do diretorio.
dictReader = csv.DictReader(csvfile, dialect=fileDialect)
for row in dictReader:
# para coleção de carros
if get_country(db, 'carros') == 0:
db.carros.insert(row)
#print(row)
return
# le os arquivos
def leArquivos(filePath):
#get all files in the given folder
fileListing = os.listdir(filePath)
for myFile in fileListing:
#le a path do arquivo
localFilePath = os.path.join(filePath, myFile)
gera_json(localFilePath)
return
# inicializar...
if __name__ == '__main__':
currentPath = os.path.dirname(__file__)
filePath = os.path.abspath(os.path.join(currentPath, os.pardir,os.pardir,'_github/forza/csv'))
leArquivos(filePath)
|
mit
| 1,924,125,129,973,735,400
| 24.666667
| 98
| 0.610853
| false
| 3.416799
| false
| false
| false
|
SambaDemon/python_vantiv
|
vantiv/request/model/identification.py
|
1
|
1178
|
from ..schemas import Schema, fields
from ..utils import FrozenMixin
from ..enums import (EnumField, CustomerTypeEnum, CurrencyEnum,
ResidenceStatusEnum)
class IdentificationSchema(Schema):
Ssn = fields.String()
BirthDate = fields.Date()
CustomerRegistrationDate = fields.Date()
IncomeAmount = fields.Decimal()
CustomerCheckingAccount = fields.String()
CustomerSavingsAccount = fields.String()
EmployerName = fields.String()
CustomerWorkTelephone = fields.String()
YearsAtResidence = fields.String()
YearsAtEmployer = fields.String()
CustomerType = EnumField(CustomerTypeEnum, by_value=True)
IncomeCurrency = EnumField(CurrencyEnum, by_value=True)
ResidenceStatus = EnumField(ResidenceStatusEnum, by_value=True)
class Identification(FrozenMixin):
Ssn = None
BirthDate = None
CustomerRegistrationDate = None
IncomeAmount = None
CustomerCheckingAccount = None
CustomerSavingsAccount = None
EmployerName = None
CustomerWorkTelephone = None
YearsAtResidence = None
YearsAtEmployer = None
CustomerType = None
IncomeCurrency = None
ResidenceStatus = None
|
mit
| 5,504,557,734,375,287,000
| 31.722222
| 67
| 0.729202
| false
| 3.887789
| false
| false
| false
|
Fantomas42/veliberator
|
veliberator/xml_wrappers.py
|
1
|
1279
|
"""Xml functions for helping in convertion of data"""
def xml_station_status_wrapper(xmlnode):
"""Convert Station status xml
to a usable dict"""
def node_value(name):
return xmlnode.getElementsByTagName(name)[0].childNodes[0].data
return {'total': int(node_value('total')),
'available': int(node_value('available')),
'free': int(node_value('free')),
'ticket': int(node_value('ticket')) == 1}
def xml_station_information_wrapper(xmlnode):
"""Convert Station information xml
to a usable dict"""
city = ''
postal_code = ''
address = xmlnode.getAttribute('address')[:-1].strip()
address_parts = xmlnode.getAttribute('fullAddress').split()
for p in address_parts:
if len(p) == 5 and p.isdigit():
postal_code = p
city = ' '.join(address_parts[address_parts.index(p) + 1:])
break
return {'id': int(xmlnode.getAttribute('number')),
'address': address,
'postal_code': postal_code,
'city': city,
'lat': xmlnode.getAttribute('lat'),
'lng': xmlnode.getAttribute('lng'),
'opened': xmlnode.getAttribute('open') == '1',
'bonus': xmlnode.getAttribute('bonus') == '1'}
|
bsd-3-clause
| 3,755,493,492,201,253,000
| 33.567568
| 71
| 0.577795
| false
| 3.911315
| false
| false
| false
|
tlemoult/spectroDb
|
tools/clean-travail-ISIS.py
|
1
|
1036
|
import sys,os
import urllib.request, urllib.parse, urllib.error,glob
import astropy.io.fits as fits
import zipfile
import shutil
def listdirectory(path):
fichier=[]
l = glob.glob(path+'\\*')
for i in l:
if os.path.isdir(i): fichier.extend(listdirectory(i))
else: fichier.append(i)
return fichier
def listdirectory2(path):
a=[]
l = glob.glob(path+'\\*')
for i in l:
if os.path.isdir(i):
f=listdirectory(i)
a.append(f)
return a
print("Script de Nettoyage des dossiers de travail ISIS")
BasePath=sys.path[0]
dbSourcePath=BasePath
PathWeb=BasePath
dirList= os.listdir(dbSourcePath)
dirList=sorted(dirList)
for path in listdirectory(BasePath):
file=os.path.basename(path)
if (file.startswith('blaze_') or file.startswith('calib_') or file.startswith('flat_') or file.startswith('#') or (file.endswith('.dat') and not file.startswith('reponse')) or (file.startswith('@') and not file.startswith('@pro')) ):
print(('remove:'+path))
os.remove(path)
|
mit
| 5,727,460,823,284,896,000
| 26.263158
| 234
| 0.671815
| false
| 3.129909
| false
| false
| false
|
nickgu/pydev
|
py3dev.py
|
1
|
5350
|
#! /bin/env python3
# encoding=utf-8
# author: nickgu
#
# Compitible for python3
#
import sys
import argparse
class ColorString:
TC_NONE ="\033[m"
TC_RED ="\033[0;32;31m"
TC_LIGHT_RED ="\033[1;31m"
TC_GREEN ="\033[0;32;32m"
TC_LIGHT_GREEN ="\033[1;32m"
TC_BLUE ="\033[0;32;34m"
TC_LIGHT_BLUE ="\033[1;34m"
TC_DARY_GRAY ="\033[1;30m"
TC_CYAN ="\033[0;36m"
TC_LIGHT_CYAN ="\033[1;36m"
TC_PURPLE ="\033[0;35m"
TC_LIGHT_PURPLE ="\033[1;35m"
TC_BROWN ="\033[0;33m"
TC_YELLOW ="\033[1;33m"
TC_LIGHT_GRAY ="\033[0;37m"
TC_WHITE ="\033[1;37m"
def __init__(self):
pass
@staticmethod
def colors(s, color):
return color + s + ColorString.TC_NONE
@staticmethod
def red(s): return ColorString.colors(s, ColorString.TC_RED)
@staticmethod
def yellow(s): return ColorString.colors(s, ColorString.TC_YELLOW)
@staticmethod
def green(s): return ColorString.colors(s, ColorString.TC_GREEN)
@staticmethod
def blue(s): return ColorString.colors(s, ColorString.TC_BLUE)
@staticmethod
def cyan(s): return ColorString.colors(s, ColorString.TC_CYAN)
def error(*args, on_screen=True):
if on_screen:
tag = ColorString.Yellow('[ERROR] ')
else:
tag = '[ERROR] '
print(tag, *args, file=sys.stderr)
def info(*args):
tag = '[INFO] '
print(tag, *args, file=sys.stderr)
class Arg(object):
'''
Sample code:
ag=Arg()
ag.str_opt('f', 'file', 'this arg is for file')
opt = ag.init_arg()
# todo with opt, such as opt.file
'''
def __init__(self, help='Lazy guy, no help'):
self.is_parsed = False;
#help = help.decode('utf-8').encode('gb18030')
self.__parser = argparse.ArgumentParser(description=help)
self.__args = None;
# -l --log
self.str_opt('log', 'l', 'logging level default=[error]', meta='[debug|info|error]');
def __default_tip(self, default_value=None):
if default_value==None:
return ''
return ' default=[%s]'%default_value
def bool_opt(self, name, iname, help=''):
#help = help.decode('utf-8').encode('gb18030')
self.__parser.add_argument(
'-'+iname,
'--'+name,
action='store_const',
const=1,
default=0,
help=help);
return
def str_opt(self, name, iname, help='', default=None, meta=None):
help = (help + self.__default_tip(default))#.decode('utf-8').encode('gb18030')
self.__parser.add_argument(
'-'+iname,
'--'+name,
metavar=meta,
help=help,
default=default);
pass
def var_opt(self, name, meta='', help='', default=None):
help = (help + self.__default_tip(default).decode('utf-8').encode('gb18030'))
if meta=='':
meta=name
self.__parser.add_argument(name,
metavar=meta,
help=help,
default=default)
pass
def init_arg(self, input_args=None):
if not self.is_parsed:
if input_args is not None:
self.__args = self.__parser.parse_args(input_args)
else:
self.__args = self.__parser.parse_args()
self.is_parsed = True;
if self.__args.log:
format='%(asctime)s %(levelname)8s [%(filename)18s:%(lineno)04d]: %(message)s'
if self.__args.log=='debug':
logging.basicConfig(level=logging.DEBUG, format=format)
logging.debug('log level set to [%s]'%(self.__args.log));
elif self.__args.log=='info':
logging.basicConfig(level=logging.INFO, format=format)
logging.info('log level set to [%s]'%(self.__args.log));
elif self.__args.log=='error':
logging.basicConfig(level=logging.ERROR, format=format)
logging.info('log level set to [%s]'%(self.__args.log));
else:
logging.error('log mode invalid! [%s]'%self.__args.log)
return self.__args
@property
def args(self):
if not self.is_parsed:
self.__args = self.__parser.parse_args()
self.is_parsed = True;
return self.__args;
def dp_to_generate_answer_range(data):
'''
data shape: (batch, clen, 2),
last dim indicates start/end prob.
'''
ans = []
l = data.shape[1]
data = data.cpu().numpy()
dp = [0.] * (l+1)
dp_sidx = [-1] * (l+1)
for b in data:
max_prob = 0
max_range = (0, 0)
dp[0] = 0
dp_sidx[0] = -1
for idx in range(l):
sp, ep = b[idx]
cur_end_prob = dp[idx] * ep
if cur_end_prob > max_prob:
max_prob = cur_end_prob
max_range = (dp_sidx[idx], idx)
if sp>dp[idx]:
dp[idx+1] = sp
dp_sidx[idx+1] = idx
else:
dp[idx+1] = dp[idx]
dp_sidx[idx+1] = dp_sidx[idx]
ans.append(max_range)
return ans
if __name__=='__main__':
pass
|
mit
| 2,808,130,532,025,652,700
| 28.558011
| 93
| 0.509907
| false
| 3.360553
| false
| false
| false
|
AloneRoad/Inforlearn
|
common/component.py
|
1
|
1678
|
import logging
import os.path
from django.conf import settings
# public variable with the intent of referencing it in templates
# and allowing tests to easily adjust the values
loaded = {}
def install_components():
global loaded
root_dir = os.path.dirname(os.path.dirname(__file__))
component_dir = os.path.join(root_dir, 'components')
possibles = os.listdir(component_dir)
logging.info("Trying to load components in %s...", possibles)
for p in possibles:
# verify that we haven't manually disabled this in settings
is_enabled = getattr(settings, 'COMPONENT_%s_DISABLED' % (p.upper()), True)
if not is_enabled:
continue
path = os.path.join(component_dir, p)
if not os.path.isdir(path):
logging.debug("Not a dir %s", p)
continue
try:
loaded[p] = __import__('components.%s' % p, {}, {}, p)
logging.debug('Loaded component: %s', p)
except ValueError:
# bad module name, things like .svn and whatnot trigger this
continue
except ImportError:
import traceback
logging.debug('Exception loading component: %s', traceback.format_exc())
continue
def include(*names):
global loaded
for name in names:
rv = loaded.get(name)
if rv:
return rv
return rv
def require(*names):
mod = include(*names)
if not mod:
raise Exception("Ultimate doom")
return mod
class LoadedOrDummy(object):
def __getitem__(self, key):
rv = include(key, "dummy_%s" % key)
if not rv:
raise KeyError(key)
return rv
def __contains__(self, key):
rv = include(key, "dummy_%s" % key)
if rv:
return True
return False
best = LoadedOrDummy()
|
apache-2.0
| -5,935,113,229,335,788,000
| 24.424242
| 79
| 0.652563
| false
| 3.712389
| false
| false
| false
|
Noahs-ARK/ARKcat
|
src/tokenizer.py
|
1
|
3104
|
import re
import nltk.data
from nltk.stem import WordNetLemmatizer
def split_sentences(text, decorate=False):
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = sent_tokenizer.sentences_from_text(text, realign_boundaries=True)
if decorate:
sentences = [sent + ' <SE>' for sent in sentences]
return sentences
def split_into_words(text, lemmatize=False, reattach=True, replace_numbers=True, split_off_quotes=True,
fix_semicolon_mistakes=True):
if fix_semicolon_mistakes:
text = fix_semicolons(text)
word_tokenizer = nltk.tokenize.TreebankWordTokenizer()
# get rid of certain character so that we can use those for special purposes
tokens = word_tokenizer.tokenize(text)
if reattach:
tokens = reattach_clitics(tokens)
if split_off_quotes:
tokens = split_off_quote_marks(tokens)
if lemmatize:
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(token) for token in tokens]
if replace_numbers:
pattern = '^[0-9]+$'
tokens = [t if re.search(pattern, t) is None else '__NUM__' for t in tokens]
tokens = split_tokens(tokens, '.,')
return tokens
def reattach_clitics(tokens):
#clitic_pattern = '^\'(s|S|d|D|ve|VE|t|T|m|M|re|RE|ll|LL)'
#clitic_pattern = "^(n't|'ll|'re|'ve)"
clitic_pattern = "^((n't)|('s)|('m)|('re)|('ve)|('ll)|('d)|('l)|('t))$"
pop_list = []
# append clitics to previous words
for i in range(1, len(tokens)):
if re.search(clitic_pattern, tokens[i]):
tokens[i-1] += tokens[i]
if i not in pop_list:
pop_list.append(i)
pop_list.sort()
pop_list.reverse()
for i in pop_list:
tokens.pop(i)
return tokens
def split_off_quote_marks(tokens):
i = 0
pattern1 = r"^('+)(.+)"
while i < len(tokens):
token = tokens[i]
match = re.search(pattern1, token)
if match is not None:
tokens[i] = match.group(1)
tokens.insert(i+1, match.group(2))
i += 1
return tokens
def fix_semicolons(text):
pattern = "([a-z]+;(t|s|m))[^a-z]"
match = re.search(pattern, text)
if match is not None:
repl = re.sub(';', "'", match.group(1))
text = re.sub(match.group(1), repl, text)
return text
def make_ngrams(text, n, lemmatize=False, reattach=True, replace_numbers=True, split_off_quotes=True):
tokens = split_into_words(text, lemmatize=lemmatize, reattach=reattach, replace_numbers=replace_numbers,
split_off_quotes=split_off_quotes)
if n > 1:
N = len(tokens)
grams = [tokens[k:N-(n-1-k)] for k in range(n)]
tokens = map(u'_'.join, zip(*grams))
return tokens
def split_tokens(tokens, delimiters):
# split on and keep periods
tokens = [re.split('([' + delimiters + '])', token) for token in tokens]
# flatten
tokens = [token for sublist in tokens for token in sublist]
tokens = [token for token in tokens if token != '']
return tokens
|
apache-2.0
| -1,482,344,033,208,116,700
| 28.846154
| 108
| 0.607281
| false
| 3.277719
| false
| false
| false
|
evrenesat/genesis
|
genesis/lab/admin.py
|
1
|
26626
|
from datetime import datetime
# import dbsettings
from functools import partial
from uuid import uuid4
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.dispatch import receiver
from django.forms import BaseInlineFormSet
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
import django.dispatch
# Register your models here.
from django.apps import apps
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
from django_ace import AceWidget
# from grappelli_autocomplete_fk_edit_link import AutocompleteEditLinkAdminMixin
from lab.utils import tlower, tupper
from .models import *
from com.models import *
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Permission
admin.site.register(Permission)
UserAdmin.add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'first_name', 'last_name')}
),
)
# admin.ModelAdmin.change_list_template = "admin/change_list_filter_sidebar.html"
def finish_selected_value(modeladmin, request, queryset):
for value_item in queryset:
value_item.analyse.mark_finished(request, True)
finish_selected_value.short_description = _("Mark as Finished")
def approve_selected_value(modeladmin, request, queryset):
for value_item in queryset:
value_item.analyse.mark_approved(request, True)
approve_selected_value.short_description = _("Mark as Approved")
def finish_selected(modeladmin, request, queryset):
for analyse in queryset:
analyse.mark_finished(request, True)
finish_selected.short_description = _("Mark as Finished")
def accept_selected(modeladmin, request, queryset):
for analyse in queryset:
analyse.mark_accepted(request, True)
finish_selected.short_description = _("Mark as Finished")
def approve_selected(modeladmin, request, queryset):
for analyse in queryset:
analyse.mark_approved(request, True)
approve_selected.short_description = _("Mark as Approved")
@admin.register(ParameterValue)
class AdminParameterValue(admin.ModelAdmin):
list_editable = ('value',)
actions = [finish_selected_value, approve_selected_value]
list_display = (
'code', 'patient_name', 'analyse_name', 'key', 'value', 'analyse_state', 'keyid')
# search_fields = ('analyse__group_relation', 'analyse__type__name', 'analyse__admission__id')
search_fields = ('analyse__group_relation', )
def get_actions(self, request):
actions = super().get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
# def get_search_results(self, request, queryset, search_term):
# # integer search_term means we want to list values of a certain admission
# try:
# search_term_as_int = int(search_term)
# return ParameterValue.objects.filter(analyse__admission=search_term_as_int), False
# except ValueError:
# return super().get_search_results(request, queryset, search_term)
def message_user(self, *args, **kwargs):
super().message_user(*args, **kwargs)
# this is a pure hack!
# we are harnessing the fact that message_user will be called
# for once after all objects are saved
if hasattr(self, 'updated_analysis'):
for analyse in self.updated_analysis[0].admission.analyse_set.all():
analyse.save_result()
def log_change(self, request, object, message):
# by overriding log_change we can catch the changed objects
# and accumulate their analyse ids
if request.method == "POST" and '_save' in request.POST:
if not hasattr(self, 'updated_analyses'):
self.updated_analysis = []
self.updated_analysis.append(object.analyse)
super().log_change(request, object, message)
def get_form(self, request, obj=None, **kwargs):
kwargs['formfield_callback'] = partial(self.formfield_for_dbfield, request=request, obj=obj)
return super().get_form(request, obj, **kwargs)
def get_formset(self, request, obj=None, **kwargs):
kwargs['formfield_callback'] = partial(self.formfield_for_dbfield, request=request, obj=obj)
return super().get_formset(request, obj, **kwargs)
def formfield_for_dbfield(self, db_field, **kwargs):
p_value = kwargs.pop('obj', None)
if p_value and db_field.name == "value" and p_value.key.presets:
db_field.choices = p_value.key.preset_choices()
return super().formfield_for_dbfield(db_field, **kwargs)
# def formfield_for_choice_field(self, db_field, request=None, **kwargs):
# if db_field.name == "value":
# kwargs['choices'] = (
# ('accepted', 'Accepted'),
# ('denied', 'Denied'),
# )
# return super().formfield_for_choice_field(db_field, request, **kwargs)
class ParameterValueInline(admin.TabularInline):
classes = ('grp-collapse grp-closed analyse_box result_parameters',)
model = ParameterValue
extra = 0
ordering = ('code',)
readonly_fields = ('key', 'keydata')
max_num = 0
fields = ('key', 'value', 'keydata')
def has_add_permission(self, request, obj=None):
return False
def keydata(self, obj):
return obj.keyid()
keydata.allow_tags = True
class ParameterKeyInline(admin.TabularInline):
model = ParameterKey
extra = 0
classes = ('grp-collapse grp-closed',)
class AdmissionSampleInline(admin.TabularInline):
model = AdmissionSample
extra = 1
classes = ('grp-collapse',)
class ParameterInline(admin.TabularInline):
model = Parameter.analyze_type.through
extra = 0
classes = ('grp-collapse',) # grp-closed
class InstitutionAnalyseInline(admin.TabularInline):
model = InstitutionAnalyse
extra = 0
classes = ('grp-collapse',) # grp-closed
class ProcessLogicForm(forms.ModelForm):
class Meta:
model = ProcessLogic
widgets = {
'code': AceWidget(mode='python', theme='twilight', width="900px", height="700px"),
}
fields = '__all__'
@admin.register(ProcessLogic)
class AdminProcessLogic(admin.ModelAdmin):
form = ProcessLogicForm
@admin.register(AnalyseType)
class AdminAnalyseType(admin.ModelAdmin):
list_filter = ('group_type', 'category',)
search_fields = ('name',)
list_display = (
'name', 'code', 'group_type', 'category', 'method', 'price', 'external', 'order')
list_editable = ('category', 'method', 'price', 'code', 'order')
filter_horizontal = ('subtypes',)
readonly_fields = ('group_type',)
fieldsets = (
(None, {
'fields': (('name', 'code','group_type',), ('sample_type', 'category', 'method'),
'process_time', 'footnote','barcode_count',
('price', 'alternative_price', 'no_of_groups'),
('external_lab', 'external_price'),)
}),
(_('Advanced'),
{'classes': ('grp-collapse', 'grp-closed'),
'fields': ('subtypes', 'process_logic',)
})
)
inlines = [ParameterInline, InstitutionAnalyseInline]
class Media:
js = [
'/static/tinymce/tinymce.min.js',
'/static/tinymce/setup.js',
]
def get_search_results(self, request, queryset, search_term):
# integer search_term means we want to list values of a certain admission
return queryset.filter(Q(name__contains=tupper(search_term))|Q(name__contains=tlower(search_term))), False
def save_related(self, request, form, formsets, change):
super().save_related(request, form, formsets, change)
if form.instance.subtypes.exists():
if not form.instance.group_type:
form.instance.group_type = True
form.instance.save()
else:
if form.instance.group_type:
form.instance.group_type = False
form.instance.save()
@admin.register(StateDefinition)
class AdminStateDefinition(admin.ModelAdmin):
list_filter = ('type',)
search_fields = ('name',)
filter_horizontal = ('type',)
@admin.register(State)
class AdminState(admin.ModelAdmin):
list_filter = (
'definition', 'group', 'sample_type', 'analyse__type', 'analyse__type__category',
'timestamp',
'current_state')
list_display = (
'definition', 'comment', 'sample_type', 'analyse_info', 'timestamp', 'current_state',
'group',
'tdt')
search_fields = ('definition__name', 'comment')
date_hierarchy = 'timestamp'
change_list_template = "admin/change_list_filter_sidebar.html"
def analyse_info(self, obj):
return "%s %s" % (obj.analyse.type.name, obj.analyse.admission.patient.full_name(15))
def tdt(self, obj):
return str(int(obj.timestamp.timestamp()))
@admin.register(Parameter)
class AdminParameter(admin.ModelAdmin):
# list_filter = (,)
# search_fields = (,)
def save_model(self, request, obj, form, change):
obj.save()
if obj.parameter_definition.strip():
obj.create_update_parameter_keys()
filter_horizontal = ('analyze_type',)
inlines = (ParameterKeyInline,)
fieldsets = (
(None, {
'fields': ('name', 'process_logic', 'analyze_type')
}),
(_('Quick parameter definition'), {
'classes': ('grp-collapse',), # grp-closed
'fields': ('parameter_definition',),
}),
)
class StateFormSet(BaseInlineFormSet):
def save_new(self, form, commit=True):
obj = super().save_new(form, commit=False)
if not obj.personnel_id:
obj.personnel = self.request.user.profile
# if obj.personnel != form._request.user.profile:
# here you can add anything you need from the request
if obj.definition.finish:
obj.analyse.mark_finished(self.request)
if obj.definition.accept:
obj.analyse.mark_accepted(self.request)
if obj.definition.approve:
obj.analyse.mark_approved(self.request)
if commit:
obj.save()
return obj
# def clean(self):
# super().clean()
# for form in self.forms:
# if not hasattr(form, 'cleaned_data'):
# continue
# if form.cleaned_data.get('DELETE'):
# raise ValidationError('Error')
class StateInline(admin.TabularInline):
model = State
extra = 1
can_delete = False
formset = StateFormSet
classes = ('grp-collapse analyse_box analyse_states',)
radio_fields = {"group": admin.VERTICAL}
fields = ('current_state', 'group', 'definition', 'comment', 'timestamp', 'personnel')
readonly_fields = ('current_state', 'timestamp', 'personnel')
ordering = ("-timestamp",)
def get_formset(self, request, obj=None, **kwargs):
formset = super().get_formset(request, obj, **kwargs)
formset.request = request
return formset
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'comment':
kwargs['widget'] = forms.Textarea()
return super().formfield_for_dbfield(db_field, **kwargs)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
field = super().formfield_for_foreignkey(db_field, request, **kwargs)
if db_field.name == 'definition':
if request._obj_ is not None:
field.queryset = field.queryset.filter(id__in=request._obj_.applicable_states_ids())
else:
field.queryset = field.queryset.none()
return field
class AnalyseAdminForm(forms.ModelForm):
class Meta:
model = Analyse
widgets = {
'group_relation': forms.HiddenInput()
}
fields = '__all__'
@admin.register(Patient)
class AdminPatient(admin.ModelAdmin):
list_display = ("name", 'surname', 'tcno', 'birthdate', 'timestamp')
date_hierarchy = 'timestamp'
# list_filter = []
search_fields = ('name', 'surname', 'tcno')
@admin.register(Analyse)
class AdminAnalyse(admin.ModelAdmin):
form = AnalyseAdminForm
raw_id_fields = ("type", 'admission')
actions = [finish_selected, approve_selected]
date_hierarchy = 'timestamp'
search_fields = ('admission__id', 'type__name', 'admission__patient__name',
'admission__patient__tcno', 'admission__patient__surname')
readonly_fields = ('id', 'approver', 'approved', 'approve_time', 'finished', 'analyser',
'completion_time', 'doctor_institution', 'patient', 'analyse_type',
'result_json')
autocomplete_lookup_fields = {
'fk': ['type', 'admission'],
}
fieldsets = (
(_('Admission Information'),
{'classes': ('grp-collapse analyse_box admission_info',),
'fields': (('analyse_type', 'doctor_institution', 'patient'),
('sample_type', 'sample_amount', 'sample_unit'),
('no_of_groups', 'medium_amount', 'medium_type', 'group_relation')
)
},
),
("State Inline", {"classes": ("placeholder state_set-group",), "fields": ()}),
("Result Inline", {"classes": ("placeholder parametervalue_set-group",), "fields": ()}),
(_('Analyse Result'),
{'classes': ('grp-collapse', 'grp-closed', 'analyse_box', 'analyse_result'),
'fields': (('short_result', 'comment'),
('finished', 'analyser', 'completion_time'),
('approved', 'approver', 'approve_time'),
)}),
(_('Advanced'),
{'classes': ('grp-collapse', 'grp-closed', 'analyse_box advanced_details'),
'fields': (
'report_override',
'result', 'result_json', 'template', 'admission', 'type',
'external_lab')
},
))
list_filter = ('finished', 'timestamp', 'type')
list_display = ('id', 'type', 'admission', 'timestamp', 'finished', 'approved')
list_display_links = ('id', 'type')
inlines = [
StateInline, ParameterValueInline
]
def get_search_results(self, request, queryset, search_term):
# integer search_term means we want to list values of a certain record
try:
search_term_as_int = int(search_term)
return Analyse.objects.filter(pk=search_term_as_int), False
except ValueError:
if len(search_term) == 32 and ' ' not in search_term:
# checking if the search term is a hash or not,
# a weak solution but should work for most cases
return Analyse.objects.filter(group_relation=search_term), False
return super().get_search_results(request, queryset, search_term)
def get_actions(self, request):
actions = super().get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def doctor_institution(self, obj):
adm = obj.admission
return '%s / %s' % (adm.institution.name, adm.doctor.full_name() if adm.doctor else '')
doctor_institution.short_description = _("Institution / Doctor")
def patient(self, obj):
return '<a href="/admin/lab/admission/%s/">%s - %s</a>' % (obj.admission.id,
obj.admission.patient.full_name(
30),
obj.admission.timestamp)
patient.short_description = _("Patient info")
patient.allow_tags = True
def analyse_type(self, obj):
external = ' | %s:%s' % (_('Ext.Lab'), obj.external_lab) if obj.external else ''
return '<span style="font-size:16px">#%s</span> / %s %s' % (obj.id, obj.type.name, external)
analyse_type.short_description = _("Analyse")
analyse_type.allow_tags = True
def save_model(self, request, obj, form, change):
# is_new = not bool(obj.id)
# if is_new:
obj.create_empty_values()
super().save_model(request, obj, form, change)
def save_related(self, request, form, formset, change):
super().save_related(request, form, formset, change)
form.instance.save_result()
# def get_queryset(self, request):
# return super().get_queryset(request).exclude(group_relation='GRP')
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == "template":
kwargs["queryset"] = request._obj_.type.reporttemplate_set.all()
return super().formfield_for_foreignkey(db_field, request, **kwargs)
def get_form(self, request, obj=None, **kwargs):
# just save obj reference for future processing in Inline
request._obj_ = obj
return super().get_form(request, obj, **kwargs)
# def changelist_view(self, request, extra_context=None):
# if not request.META['QUERY_STRING'] and \
# not request.META.get('HTTP_REFERER', '').startswith(request.build_absolute_uri()):
# return HttpResponseRedirect(request.path + "?finished__exact=0")
# return super().changelist_view(request, extra_context=extra_context)
class Media:
js = [
'/static/tinymce/tinymce.min.js',
'/static/tinymce/setup.js',
]
@admin.register(ReportTemplate)
class ReportTemplateAdmin(admin.ModelAdmin):
filter_horizontal = ('analyse_type',)
save_as = True
class Media:
js = [
'/static/tinymce/tinymce.min.js',
'/static/tinymce/setup.js',
]
@admin.register(Doctor)
class DoctorAdmin(admin.ModelAdmin):
search_fields = ('name', 'surname')
raw_id_fields = ('institution',)
autocomplete_lookup_fields = {
'fk': ['institution', ],
}
def save_model(self, request, obj, form, change):
if not obj.institution:
# create a clinic record for doctors who doesn't
# belong to an institution
ins = Institution(name="%s %s" % (obj.name, obj.surname), type=30)
ins.save()
obj.institution = ins
obj.save()
class InstitutePricingInline(admin.TabularInline):
model = InstitutePricing
classes = ('grp-collapse',)
class AnalysePricingInline(admin.TabularInline):
model = AnalysePricing
classes = ('grp-collapse',)
fields = ('analyse_type', 'price', 'discount_rate')
@admin.register(Institution)
class InstitutionAdmin(admin.ModelAdmin):
search_fields = ('name', 'id', 'code')
inlines = [InstitutePricingInline, AnalysePricingInline]
class AnalyseInline(admin.TabularInline):
model = Analyse
extra = 1
classes = ('grp-collapse',)
# autocomplete_lookup_fields = {
# 'type_fk': ['type'],
# }
# show_change_link = True
raw_id_fields = ("type",)
readonly_fields = ('get_state', 'finished', 'ext_lab')
fields = ('get_state', 'type', 'sample_type', 'sample_amount', 'sample_unit', 'medium_amount', 'medium_type',
'ext_lab')
# list_filter = ('category__name',)
autocomplete_lookup_fields = {
'fk': ['type'],
}
def get_state(self, obj):
states = obj.state_set.filter(current_state=True)
if len(states) == 1:
return states[0].definition.name
else:
return '<br/>'.join('%s - %s' % (st.group, st.definition.name) for st in states)
get_state.short_description = _('Analyse state')
get_state.allow_tags = True
def ext_lab(self, obj):
return obj.external_lab if obj.external else ''
ext_lab.short_description = _('Ext.Lab')
def get_extra(self, request, obj=None, **kwargs):
return 0 if obj else self.extra
def get_queryset(self, request):
return super().get_queryset(request).exclude(group_relation='GRP')
class AdmissionStateInline(admin.TabularInline):
model = AdmissionState
extra = 1
classes = ('grp-collapse',)
post_admission_save = django.dispatch.Signal(providing_args=["instance", ])
@admin.register(Admission)
class AdminAdmission(admin.ModelAdmin):
date_hierarchy = 'timestamp'
search_fields = ('patient__name', 'patient__surname')
list_display = ('id', 'patient', 'institution', 'analyse_state', 'timestamp')
list_display_links = ('id', 'patient')
readonly_fields = ('id', ) #'timestamp'
raw_id_fields = ('patient', 'institution', 'doctor')
fields = (('id', 'timestamp'), ('patient', 'is_urgent'), ('doctor', 'institution'),
('week', 'upd_week', 'lmp_date'),
('indications', 'history'),
)
autocomplete_lookup_fields = {
'fk': ['patient', 'institution', 'doctor'],
}
inlines = [AnalyseInline, AdmissionStateInline] # AdmissionSampleInline,
def get_form(self, request, obj=None, **kwargs):
# just save obj reference for future processing in Inline
request._obj_ = obj
return super().get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
obj.save()
# if obj.parameter_definition.strip():
# obj.create_update_parameter_keys()
def _create_payment_item(self):
pass
def get_search_results(self, request, queryset, search_term):
# integer search_term means we want to list values of a certain admission
try:
search_term_as_int = int(search_term)
if len(search_term) < 6:
queryset = queryset.filter(pk=search_term_as_int)
else:
queryset = queryset.filter(patient__tcno__contains=search_term_as_int)
except ValueError:
queryset = queryset.filter(Q(patient__name__icontains=search_term)|
Q(patient__surname__icontains=search_term))
return queryset, False
def _save_analyses(self, admission, analyses):
for analyse in analyses:
if not analyse.type:
continue
is_new = not analyse.id
analyse.save()
if analyse.type.group_type:
analyse.group_relation = 'GRP' # this is a group
rand_group_code = uuid4().hex
for sub_analyse_type in analyse.type.subtypes.all():
anl = Analyse(type=sub_analyse_type,
sample_type=analyse.sample_type,
grouper=analyse,
group_relation=analyse.id,
external=sub_analyse_type.external,
external_lab=sub_analyse_type.external_lab,
admission=admission)
anl.save()
anl._set_state_for(self._request.user, first=True)
if analyse.type.external:
analyse.external = analyse.type.external
analyse.external_lab = analyse.type.external_lab
analyse.save()
if is_new:
analyse._set_state_for(self._request.user, first=True)
post_admission_save.send(sender=Admission, instance=admission)
def save_related(self, request, form, formsets, change):
"""
- expand group-type analyses
- create payment and payment-items
"""
form.save_m2m()
self._request = request
if not change:
adm = form.instance
# payment = Payment(admission=adm, patient=adm.patient)
# if adm.institution.preferred_payment_method == 20:
# payment.institution = adm.institution
# else:
# payment.patient = adm.patient
for formset in formsets:
if formset.model == Analyse:
self._save_analyses(formset.instance, formset.save(commit=False))
formset.save()
customer_charge, new = AdmissionPricing.objects.get_or_create(admission=form.instance)
customer_charge.process_payments()
class MethodAdminForm(forms.ModelForm):
analysetype_set = forms.ModelMultipleChoiceField(
queryset=AnalyseType.objects.all(),
required=False,
widget=FilteredSelectMultiple(
verbose_name=_('Analyse Types'),
is_stacked=False
)
)
class Meta:
model = AnalyseType
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['analysetype_set'].initial = self.instance.analysetype_set.all()
def save(self, *args, **kwargs):
kwargs['commit'] = True
return super().save(*args, **kwargs)
def save_m2m(self):
self.instance.analysetype_set.clear()
self.instance.analysetype_set.add(*self.cleaned_data['analysetype_set'])
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'code',)
form = MethodAdminForm
filter_horizontal = ('states',)
@admin.register(Method)
class AdminMethod(admin.ModelAdmin):
list_display = ('name', 'code',)
form = MethodAdminForm
@admin.register(MediumType)
class AdminMedium(admin.ModelAdmin):
list_display = ('name', 'code', 'order')
list_editable = ('code', 'order',)
@receiver(post_admission_save, sender=Admission)
def create_payment_objects(sender, instance, **kwargs):
# instance.analyse_set.filter(group_relation='GRP').delete()
for analyse in instance.analyse_set.exclude(group_relation='GRP'):
analyse.create_empty_values()
# instance.analyse_set.filter(group_relation='GRP').delete()
@admin.register(Setting)
class AdminSetting(admin.ModelAdmin):
search_fields = ('name',)
list_display = ('name', 'value', 'key')
list_editable = ('value', )
readonly_fields = ('name', 'key')
app_models = apps.get_app_config('lab').get_models()
for model in app_models:
try:
admin.site.register(model)
except AlreadyRegistered:
pass
|
gpl-3.0
| 6,820,900,275,454,348,000
| 33.624187
| 114
| 0.606512
| false
| 3.92656
| false
| false
| false
|
stevecshanks/trello-next-actions
|
nextactions/card.py
|
1
|
1191
|
from urllib.parse import urlparse
class Card:
AUTO_GENERATED_TEXT = 'Auto-created by TrelloNextActions'
def __init__(self, trello, json):
self._trello = trello
self.id = json['id']
self.name = json['name']
self.board_id = json['idBoard']
self.description = json['desc']
self.url = json['url']
def isAutoGenerated(self):
return Card.AUTO_GENERATED_TEXT in self.description
def getProjectBoard(self):
board_id = self._getProjectBoardId()
return self._trello.getBoardById(board_id)
def _getProjectBoardId(self):
url_components = urlparse(self.description)
path_segments = url_components.path.split('/')
if (len(path_segments) >= 3):
return path_segments[2]
else:
raise ValueError("Description could not be parsed as project URL")
def __eq__(self, other):
return self.id == other.id
def linksTo(self, other):
return self.description.startswith(other.url)
def archive(self):
self._trello.put(
'https://api.trello.com/1/cards/' + self.id + '/closed',
{'value': "true"}
)
|
mit
| 3,288,807,941,837,223,400
| 28.04878
| 78
| 0.596977
| false
| 3.733542
| false
| false
| false
|
petrjasek/superdesk-core
|
superdesk/sequences.py
|
1
|
2026
|
import superdesk
import traceback
from superdesk import get_resource_service
from .resource import Resource
from .services import BaseService
import logging
logger = logging.getLogger(__name__)
def init_app(app):
endpoint_name = "sequences"
service = SequencesService(endpoint_name, backend=superdesk.get_backend())
SequencesResource(endpoint_name, app=app, service=service)
class SequencesResource(Resource):
schema = {
"key": {"type": "string", "required": True, "nullable": False, "empty": False, "iunique": True},
"sequence_number": {
"type": "number",
"default": 1,
},
}
etag_ignore_fields = ["sequence_number", "name"]
internal_resource = True
mongo_indexes = {
"key_1": ([("key", 1)], {"unique": True}),
}
class SequencesService(BaseService):
def get_next_sequence_number(self, key_name, max_seq_number=None, min_seq_number=1):
"""
Generates Sequence Number
:param key: key to identify the sequence
:param max_seq_num: default None, maximal possible value, None means no upper limit
:param min_seq_num: default 1, init value, sequence will start from the NEXT one
:returns: sequence number
"""
if not key_name:
logger.error("Empty sequence key is used: {}".format("\n".join(traceback.format_stack())))
raise KeyError("Sequence key cannot be empty")
target_resource = get_resource_service("sequences")
sequence_number = target_resource.find_and_modify(
query={"key": key_name}, update={"$inc": {"sequence_number": 1}}, upsert=True, new=True
).get("sequence_number")
if max_seq_number:
if sequence_number > max_seq_number:
target_resource.find_and_modify(
query={"key": key_name}, update={"$set": {"sequence_number": min_seq_number}}
)
sequence_number = min_seq_number
return sequence_number
|
agpl-3.0
| -385,703,106,562,796,540
| 32.766667
| 104
| 0.616486
| false
| 4.043912
| false
| false
| false
|
jiaxiaolei/pycate
|
pycate/model/refresh_model.py
|
1
|
1488
|
# -*- coding:utf-8 -*-
# 预约更新
# import tornpg
import libs
import peewee
from core.base_model import BaseModel
class phpmps_refresh(BaseModel):
uid = peewee.CharField(max_length=36, null=False, unique=True, help_text='类别的ID', primary_key=True)
refresh_time = peewee.IntegerField(null=False)
info_uid = peewee.CharField(max_length=36, null=False)
time_str = peewee.CharField(max_length=36)
class MRefresh(BaseModel):
def __init__(self):
try:
phpmps_refresh.create_table()
pass
except:
pass
def getall(self):
return (phpmps_refresh.select().order_by('cityid'))
def get_by_id(self, info_id):
'''
根据分类的ID进行选择
'''
return (phpmps_refresh.select().where(phpmps_refresh.info_uid == info_id))
def del_by_id(self, uid):
try:
entry = phpmps_refresh.delete().where(phpmps_refresh.uid == uid)
entry.execute()
return True
except:
return False
def insert_data(self, par_arr):
uid = libs.tool.get_uid()
# libs.tool.mark_it()
try:
entry = phpmps_refresh.create(
uid=uid,
info_uid=par_arr[0],
refresh_time=par_arr[1],
time_str=par_arr[2],
)
return True
except:
return False
|
mit
| 27,952,428,316,652,670
| 24.472727
| 103
| 0.531593
| false
| 3.491607
| false
| false
| false
|
bd-j/sedpy
|
sedpy/photometer.py
|
1
|
10083
|
# Simple Aperture photometry. kind of a stupid class dependence.
# Ideally a photometer object should take an image and a region object
# as arguments, where the region object is an instance of a particualr aperture class and
# can return an in_region boolean (or perhaps a 'fraction') for
# any position(s). As it is now the 'photometer' object (called Aperture)
# is actually subclassed by the more detailed apertures/regions,
# and requires passing a shape disctionary as well. redundant
import sys
import numpy as np
from numpy import hypot, sqrt
#the below are for some of the more arcane sky measurement methods
try:
from scipy.optimize import curve_fit
import sklearn
from astroML.density_estimation import bayesian_blocks
import matplotlib.pyplot as pl
except ImportError:
pass
thismod = sys.modules[__name__]
class Photometer(object):
"""
Trying for a better class dependence. Basically wraps the image
in an object with photometry methods. Incomplete
"""
def __init__(self, image, wcs=None, ivar=None):
self.nx, self.ny = image.shape
self.image = image.flatten()
self.wcs = wcs
self.ivar = ivar
yy, xx = np.indices(self.nx, self.ny)
if wcs is not None:
self._x, self._y = wcs.wcs_pix2world(xx, yy, 0)
else:
self._x, self._y = xx, yy
def measure_flux(self, aperture, background):
"""
Measure background subtracted flux. Takes an aperture object,
and a local background object.
"""
o, a, e = self.object_flux(aperture)
b, ba, be = background.evaluate(self)
flux = o - a*b
flux_var = e*e + a*a*be*be/ba
return flux, sqrt(flux_var)
def object_flux(self, aperture, weights=1.0):
"""
Measure total flux (source + background) within an aperture.
Takes an image, and an aperture object.
"""
fracs = aperture.contains(self._x, self._y)
inds = fracs > 0
if self.ivar is not None:
unc = sqrt((fracs[inds]/self.ivar[inds]).sum())
else:
unc = np.nan
return (self.image * weights * fracs).sum(), fracs.sum(), unc
class Aperture(object):
def world_to_pixels(shape, wcs):
pass
def object_flux(self, shape, image, ivar = None):
"""Measure total flux within an aperture (source + background)"""
inds, fracs = self.pixnum(**shape)
unc = 0
if ivar is not None:
unc = sqrt((fracs/ivar[inds[0], inds[1]]).sum())
return (image[inds[0], inds[1]]*fracs).sum(), fracs.sum(), unc
def measure_flux(self, shape, image, wcs = None, skypars = None, ivar = None):
"""Measure background subtracted flux."""
o, a, e = self.object_flux(shape, image, ivar = ivar)
b, ba, be = self.background.evaluate(image, skypars)
flux = o - a*b
flux_var = e*e + a*a*be*be/ba
return flux, sqrt(flux_var)
#def get_flux(self, image, ivar = None):
# return self.measure_flux(self.shape, image, ivar = ivar, skypars = self.skypars, wcs = self.wcs)
class Circular(Aperture):
def __init__(self, exact = False):
if exact is True:
self.pixnum = circle_frac_exact
else:
self.pixnum = circle_frac_quick
self.background = ZeroSky()
class Elliptical(Aperture):
def __init__(self):
self.pixnum = ellipse_frac_quick()
self.background = ZeroSky()
class Box(Aperture):
def __init__(self):
self.pixnum = box_frac_quick()
self.background = ZeroSky()
#
######## Classes for sky measurement ######
class Background(object):
def evaluate(self, image, skypars):
inds, fracs = self.pixnum(**skypars)
value, sdpp = self.skystats(image[inds[0], inds[1]], **skypars)
return value, len(inds), sdpp
class Annulus(Background):
def __init__(self, bgtype = 'quartile_sky'):
self.pixnum = circle_frac_quick
self.skystats = getattr(thismod, bgtype)
class EllipticalAnnulus(Background):
def __init__(self, bgtype = 'quartile_sky'):
self.pixnum = ellipse_frac_quick
self.skystats = getattr(thismod,bgtype)
class ZeroSky(Background):
"""A class for sky values of zero, or for user defined sky statistics.
The return_value is a tuple giving (sky, sky_area, sigma_sky_per_pixel)"""
def __init__(self, bgtype = 'quartile_sky', return_value = (0,1,0)):
self.pixnum = None
self.skystats = None
self.return_value = return_value
def evaluate(self,image, skypars):
return self.return_value
### Pixnum methods ####
def circle_frac_quick(xcen = 0, ycen = 0, radius = 1, inner_radius = None, subpixels = 1, **extras):
"""obtain fractional pixel coverage. optionally use subpixels to
increase precison (though this doesn't seem to help). Assumes pixel
centers have coordinates X.5, Y.5 """
#setup
center = np.array([xcen,ycen])
sz = np.ceil((radius+1)*2)
start = np.floor(center +0.5-radius)
center = center*subpixels
radius = radius*subpixels
sz = sz*subpixels
start = (start-1)*subpixels
if (start < 0).any():
raise ValueError('Aperture extends off image edge')
off = center - start - 0.5
yy, xx = np.ogrid[ 0:sz, 0:sz ]
rr = hypot(xx - off[0], yy-off[1])
#find pixels within the radius
within = (radius+0.5) - rr
within[within > 1.0] = 1.0
within[within < 0] = 0.
#if it's an annulus
if inner_radius is not None:
within_inner = inner_radius*subpixels + 0.5 - rr
within_inner[within_inner < 0.0] = 0.0
within_inner[within_inner > 1.0] = 1.0
within = within - within_inner
an = within
#rebin if you used subpixels
if subpixels != 1:
an = an.reshape((an.shape[0]/subpixels, subpixels, an.shape[1]/subpixels, subpixels)).mean(1).mean(2)
#pick the pixels to rturn, and get their fractional coverage
pix1 = np.where(an > 0.0)
fracs = an[pix1[0],pix1[1]]
x = (pix1[0] + start[0]/subpixels).astype('i8')
y = (pix1[1] + start[1]/subpixels).astype('i8')
return (x, y), fracs
def circle_frac_exact(xcen, ycen, radius):
pass
def ellipse_frac_quick(xcen = 0, ycen = 0, a = 1, b = 1, pa = 0, precision = None):
yy, xx = np.ogrid[ 0:sz, 0:sz ]
dx, dy = (xx - off[0]), (yy-off[1])
within = 1 - np.sqrt(((dx * np.cos(pa) - dy * np.sin(pa))/a)**2 + ((dx * np.sin(pa) + dy * np.cos(pa))/b)**2)
within[within > 1.0] = 1.0
within[within < 0] = 0.
#rebin if you used subpixels
if subpixels != 1:
an = an.reshape((an.shape[0]/subpixels, subpixels, an.shape[1]/subpixels, subpixels)).mean(1).mean(2)
#pick the pixels to rturn, and get their fractional coverage
pix1 = np.where(an > 0.0)
fracs = an[pix1[0],pix1[1]]
x = (pix1[0] + start[0]/subpixels).astype('i8')
y = (pix1[1] + start[1]/subpixels).astype('i8')
return (x, y), fracs
#####SKY statistics determination methods #####
def quartile_sky(values, percentiles = [0.16, 0.5, 0.84], **extras):
"""Use the median and 16th percentile to estimate the standard
deviation per pixel."""
percentiles = np.asarray(percentiles)
npix = len(values)
#oo = np.argsort(values)
qval = np.sort(values)[np.round(npix*percentiles).astype('i8')]
#qval = values[oo[np.round(npix*percentiles)]]
return qval[1], qval[1]-qval[0]
def gaussfit_sky(values, p_thresh = 0.65, plot = False, **extras):
"""Fit a gaussian to the lower part of a histogram of the sky values.
The histogram bins are estimated using Bayesian blocks. p_thresh gives
the percentile below which the gaussian is fitted to the data. Return
central value and estimate of standard deviation per pixel """
bins = bayesian_blocks(values)
print(len(bins),bins)
#dbin = bins[1:]-bins[:-1]
cbin = (bins[1:]+bins[:-1])/2
hist = np.histogram(values, bins = bins, range = (bins.min(), bins.max()), density = True)
#pdf = hist/dbin
val_thresh = np.percentile(values, p_thresh)
lower = cbin < p_thresh
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
p0 = [np.max(hist[0]), values.mean(), values.std()]
coeff, var_matrix = curve_fit(gauss, cbin[lower], hist[0][lower], p0=p0)
if plot:
print(len(hist[1]), len(hist[0]),type(coeff))
pl.figure()
pl.plot(cbin,hist[0], color = 'b')
pl.plot(cbin, gauss(cbin, [coeff[0], coeff[1], coeff[2]]), color = 'r')
pl.axvline(val_thresh)
return coeff[1], coeff[2]
def gmm_sky(values, **extras):
"""Use a gaussian mixture model, via expectation maximization.
of course, there's only one gaussian. could add another for
faint sources, bad pixels, but..."""
gmm = sklearn.mixture.GMM()
r = gmm.fit(values)
return r.means_[0, 0], np.sqrt(r.covars_[0, 0])
def sigclip_sky(values, sigma = [3, 2.25], minlength = 5, **extras):
"""Use iterative sigma clipping"""
def between(vals, sigs):
m, s = vals.mean(), vals.std()
return (vals < m+sig[1]*s) & (vals > m-sig[0]*s)
while ( (False in between(values, sigma)) & (len(values) > minlength) ):
values = values[between(values,sigma)]
return values.mean(), values.std()
##### Centroiding #######
def centroid(images):
"""Dumb dumb centroiding. assumes x and y axes are the
last two dimensions of images. Something is wrong with the
broadcasting. absolutely *have* to include weighting"""
sz = images.shape[-2:]
xg = np.arange(sz[0])
yg = np.arange(sz[1])
denom = images.sum(axis = (-1, -2))
y = (yg[None,None,:]*images).sum(axis = (-2, -1)) / denom
x = (xg[None,:,None]*images).sum(axis = (-2, -1)) / denom
return x, y
|
gpl-2.0
| 1,693,906,440,199,580,000
| 33.064189
| 113
| 0.60726
| false
| 3.213193
| false
| false
| false
|
MCME/WebStatus
|
mcmeAPI/db/models.py
|
1
|
1641
|
from sqlalchemy import Column, String, Integer, DateTime, PickleType, Boolean
from mcmeAPI.db import Base
class User(Base):
__tablename__ = 'users'
name = Column(String(20), primary_key=True)
group = Column(String(15)) #rank
updated = Column(DateTime())
ob = Column(Boolean)
staff = Column(Boolean)
permissions = Column(PickleType)
worlds = Column(PickleType)
@property
def serialize(self):
return {'group':self.group,
'name':self.name,
'ob':self.ob,
'staff':self.staff,
'permissions':self.permissions,
'worlds':self.worlds,
'updated':dump_datetime(self.updated)
}
def __repr__(self):
return '<User %r>' % (self.name)
class Server(Base):
__tablename__ = 'servers'
name = Column(String(15), primary_key=True)
status = Column(String(10))
players = Column(PickleType)
maxplayers = Column(Integer)
num_players = Column(Integer)
plugins = Column(PickleType)
updated = Column(DateTime())
@property
def serialize(self):
return ({'name': self.name,
'status': self.status,
'players':self.players,
'num_players':self.num_players,
'maxplayers':self.maxplayers,
'plugins':self.plugins,
'updated':dump_datetime(self.updated)})
def dump_datetime(value):
"""Deserialize datetime object into string form for JSON processing."""
if value is None:
return None
return value.strftime("%Y-%m-%dT%H:%M:%S")
|
gpl-3.0
| -4,360,253,035,946,446,300
| 29.407407
| 77
| 0.575868
| false
| 4.07196
| false
| false
| false
|
ubuntunux/JumpJump
|
PyInterpreter/Tutorial.py
|
1
|
6321
|
import Utility as Util
from Utility import *
from Constants import *
from collections import OrderedDict
import browser
#---------------------#
# CLASS : Tutorial layout class
#---------------------#
class TutorialLayout:
def __init__(self, ui):
self.ui = ui
self.tutorialMap = OrderedDict({})
layout_height = 0
self.screen = Screen(name=szTutorial)
# screen menu layout
self.screenMenuLayout = BoxLayout(orientation="horizontal", size_hint=(1, None), height="35dp")
btn_console = Button(text="Console", background_color=[1.5,0.8,0.8,2])
btn_editor = Button(text="Code Editor", background_color=[0.8,1.5,0.8,2])
btn_tutorial = Button(text="Python Tutorial", background_color=[0.8,0.8,1.5,2])
btn_console.bind(on_release=lambda inst:self.ui.setMode(szConsole))
btn_editor.bind(on_release=lambda inst:self.ui.setMode(szEditor))
self.screenMenuLayout.add_widget(btn_console)
self.screenMenuLayout.add_widget(btn_editor)
self.screenMenuLayout.add_widget(btn_tutorial)
self.screen.add_widget(self.screenMenuLayout)
self.tutorialSV = ScrollView(size_hint=(1, None), size=(W,H - self.screenMenuLayout.size[1]), pos=(0, self.screenMenuLayout.top))
with self.tutorialSV.canvas.before:
Color(0.1, 0.1, 0.2, 1)
Rectangle(size=WH)
self.tutorialLayout = BoxLayout(orientation="vertical", size_hint_y = None)
self.tutorialSV.add_widget(self.tutorialLayout)
self.screen.add_widget(self.tutorialSV)
# add title header
image = Image(source=pythonLogo, allow_stretch=True, keep_ratio=True, size_hint_x=None)
label_Title = Label(text = "Python Tutorials", font_size="30dp", bold=True, color=[1.0,0.7,0.4,1])
titleLayout = BoxLayout(orientation="horizontal", padding=[metrics.dp(20),0,0,0], size_hint=(1, 50.0/30.0))
titleLayout.add_widget(image)
titleLayout.add_widget(label_Title)
self.tutorialLayout.add_widget(titleLayout)
layout_height += metrics.dp(50)
# add python tutorial url
url = "https://docs.python.org/2.7/tutorial"
btn_url = Button(text="Excepts from {}".format(url), font_size="13dp")
btn_url.bind(on_release=lambda inst:browser.open_url(url))
self.tutorialLayout.add_widget(btn_url)
layout_height += metrics.dp(30)
# add my comment
self.tutorialLayout.add_widget(Label(text="I will update more tutorial.", font_size="15dp", color=[1.0,0.85,0.7,1]))
self.tutorialLayout.add_widget(Label(text=" ", font_size="12dp"))
layout_height += metrics.dp(50)
# create tutorial buttons
fileList = {}
for dirpath, dirnames, filenames in os.walk(tutorialDir):
filenames.sort()
fileList[dirpath] = filenames
keys = fileList.keys()
keys.sort()
for dirpath in fileList:
# add category label
if fileList[dirpath]:
label_category = Label(text = os.path.split(dirpath)[-1], font_size="18dp", halign="left", bold=True, color=[1.0,0.85,0.7,1], size_hint_y=40.0/30.0)
self.tutorialLayout.add_widget(label_category)
layout_height += metrics.dp(40)
# add tutorials
for filename in fileList[dirpath]:
# load tutorial file
f = open(os.path.join(dirpath, filename), "r")
lines = list(f)
f.close()
desc = "".join(lines)
# add a button
btn = Button(text=desc[:desc.find("\n")], font_size="15dp", size_hint_y=1, background_color=[0.8, 0.8, 1.5, 1])
btn.bind(on_release = self.chooseTutorial)
self.tutorialMap[btn] = desc
self.tutorialLayout.add_widget(btn)
layout_height += metrics.dp(30)
# refresh height
self.tutorialLayout.height = layout_height
def chooseTutorial(self, btn):
if btn in self.tutorialMap:
self.ui.clearOutput()
desc = self.tutorialMap[btn]
self.ui.displayText("\n-------------------------", 1)
# split desc by line
lines = desc.split("\n")
# show title
if lines:
self.ui.displayText("Tutorial : " + lines.pop(0), 1)
# show tutorial body
textList = []
isInCode = False
for line in lines:
if line.startswith("[code]"):
self.ui.displayText("\n".join(textList), 1)
textList = []
elif line.startswith("[/code]"):
self.ui.displayText("\n".join(textList), 1, background_color=(0.5, 0.5, 1, 0.35))
textList = []
else:
textList.append(line)
else:
if isInCode:
self.ui.displayText("\n".join(textList), 1, background_color=(0.5, 0.5, 1, 0.35))
else:
self.ui.displayText("\n".join(textList), 1)
# end of tutorial body
self.ui.displayText("------------------------\n\nLet's try this!!\n", 1)
# next, prev tutorial buttons
padding = kivy.metrics.dp(20)
fontSize = kivy.metrics.dp(14)
spacing = kivy.metrics.dp(20)
layout = BoxLayout(size_hint=(1,None), height="70dp", spacing=spacing, padding=[0, padding, 0, padding])
maxCharacter = int(math.ceil((W-spacing) / fontSize)) - 2
buttons = self.tutorialMap.keys()
curIndex = buttons.index(btn)
btnColor = [0.8, 0.8, 1.5, 1]
btn_prev = Button(text="----", font_size=fontSize, background_color=btnColor)
btn_next = Button(text="----", font_size=fontSize, background_color=btnColor)
if curIndex > 0:
btn_prevTutorial = buttons[curIndex - 1]
if len(btn_prevTutorial.text) >= maxCharacter:
btn_prev.text = btn_prevTutorial.text[:maxCharacter-3] + "..."
else:
btn_prev.text = btn_prevTutorial.text
btn_prev.bind(on_release = lambda inst:self.chooseTutorial(btn_prevTutorial))
if curIndex < len(buttons) - 1:
btn_nextTutorial = buttons[curIndex + 1]
if len(btn_nextTutorial.text) >= maxCharacter:
btn_next.text = btn_nextTutorial.text[:maxCharacter-3] + "..."
else:
btn_next.text = btn_nextTutorial.text
btn_next.bind(on_release = lambda inst:self.chooseTutorial(btn_nextTutorial))
layout.add_widget(btn_prev)
layout.add_widget(btn_next)
self.ui.outputLayout_add_widget(layout)
self.ui.setMode(szConsole)
def touchPrev(self):
self.ui.setMode(szConsole)
|
gpl-3.0
| -1,433,163,130,658,457,600
| 40.592105
| 156
| 0.626325
| false
| 3.333861
| false
| false
| false
|
kerwinxu/barcodeManager
|
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Node/Alias.py
|
1
|
4400
|
"""scons.Node.Alias
Alias nodes.
This creates a hash of global Aliases (dummy targets).
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Alias.py 5023 2010/06/14 22:05:46 scons"
import collections
import SCons.Errors
import SCons.Node
import SCons.Util
class AliasNameSpace(collections.UserDict):
def Alias(self, name, **kw):
if isinstance(name, SCons.Node.Alias.Alias):
return name
try:
a = self[name]
except KeyError:
a = SCons.Node.Alias.Alias(name, **kw)
self[name] = a
return a
def lookup(self, name, **kw):
try:
return self[name]
except KeyError:
return None
class AliasNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return default_ans.Alias(s)
class AliasBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Alias(SCons.Node.Node):
NodeInfo = AliasNodeInfo
BuildInfo = AliasBuildInfo
def __init__(self, name):
SCons.Node.Node.__init__(self)
self.name = name
def str_for_display(self):
return '"' + self.__str__() + '"'
def __str__(self):
return self.name
def make_ready(self):
self.get_csig()
really_build = SCons.Node.Node.build
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Alias nodes get built regardless of
# what directory scons was run from. Alias nodes
# are outside the filesystem:
return 1
def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs)
def sconsign(self):
"""An Alias is not recorded in .sconsign files"""
pass
#
#
#
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def build(self):
"""A "builder" for aliases."""
pass
def convert(self):
try: del self.builder
except AttributeError: pass
self.reset_executor()
self.build = self.really_build
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
csig = SCons.Util.MD5signature(contents)
self.get_ninfo().csig = csig
return csig
default_ans = AliasNameSpace()
SCons.Node.arg2nodes_lookups.append(default_ans.lookup)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bsd-2-clause
| 1,780,583,719,408,763,100
| 26.947368
| 95
| 0.623409
| false
| 3.99274
| false
| false
| false
|
zubinshah/algorithms
|
data-structures/python/sorting.py
|
1
|
11069
|
#!/usr/local/bin/python
"""
SORTING ALGORITHMS
+ Bubble Sort
+ Selection Sort
This program will test *num* entries as an unsorted array, and sort them as well
as log the time taken to understand the performances of varous sorting algorithms.
"""
#******************************************************************************
import random
import sys
import time
debug = False
#******************************************************************************
def sel_sort(data):
c, s = 0, 0
num = len(data)
if debug is True:
print "Data Length : " + str(num)
for i in xrange(0, num):
curr_min = i
newMin = False
for j in xrange(i+1, num):
c += 1
if data[j] < data[curr_min]:
curr_min = j
newMin = True
if newMin is True:
s += 1
data[i] , data[curr_min] = data[curr_min], data[i]
return [c, s]
#******************************************************************************
#******************************************************************************
def bubble_sort(data):
c, s = 0, 0
num = len(data)
for i in range(0, num):
swap = False
if debug is True:
print "\niteration " + str(i)
for j in range(0, num-i-1):
c = c + 1
if data[j+1] < data[j]:
data[j], data[j+1] = data[j+1], data[j]
s = s + 1
swap = True
if swap is False:
break
return [c, s]
#******************************************************************************
#******************************************************************************
def recursive_bubble_sort(data, n):
if n == 1:
return
for i in range(0, n-1):
if data[i] > data[i+1]:
data[i], data[i+1] = data[i+1], data[i]
recursive_bubble_sort(data, n-1)
#******************************************************************************
def quick_sort_partition (data, lo, hi, c):
pivot = hi
partition = lo
for i in xrange(lo, hi):
c[0] += 1
if data[i] < data[pivot]:
c[1] += 1
data[i], data[partition] = data[partition], data[i]
partition = partition + 1
c[1] += 1
data[partition], data[pivot] = data[pivot], data[partition]
return partition
def quick_sort (data, lo, hi, c=[0,0]):
if lo < hi :
p = quick_sort_partition (data, lo, hi, c)
quick_sort(data, lo, p-1, c)
quick_sort(data, p+1, hi, c)
return c
#******************************************************************************
#******************************************************************************
## implement in-place merge routine for merge_sort
def merge_inplace_merge(data, low, mid, high, c):
i = low
j = mid
while i < j and j <= high:
c[0] += 1
if data[i] < data[j]:
i = i + 1
else:
c[1]+=1
data[i], data[j] = data[j], data[i]
i = i + 1
if i == j:
j = j + 1
i = low
#middle = mid
#for j in xrange(mid, high+1):
# for i in xrange(low, middle):
# if middle <= high:
# if data[i] > data[j]:
# print "swap", i, data[i], j, data[j], middle
# data[i], data[j] = data[j], data[i]
# middle = middle + 1
#return
## Implementing in-place merge sort. Space optimized.
def merge_sort (data, lo, hi, c=[0,0]):
if (lo < hi):
mid = (lo + hi + 1) / 2
merge_sort (data, lo, mid-1, c)
merge_sort (data, mid, hi, c)
merge_inplace_merge(data, lo, mid, hi, c)
return c
#******************************************************************************
#******************************************************************************
def insertion_sort (data):
c, s = 0, 0
for i in xrange(1, len(data)):
#insert elem data[i] at right location and shuffling
s += 1
item = data[i]
j = i
while j > 0:
#scan back from j by shifting
c+=1
if data[j-1] > item:
s+=1
data[j] = data[j-1]
j -= 1 #location not found, keep shuffling
else:
data[j] = item #copy item at right location
break
if j == 0:
data[j] = item
return [c, s]
#******************************************************************************
def heapsort_heapify(data, num, i, c):
largest = i
left = 2*i + 1
right = 2*i + 2
if left < num and data[left] > data[largest]:
largest = left
if right < num and data[right] > data[largest]:
largest = right
c[0] += 2 #two comparisions done above
if i is not largest:
data[i], data[largest] = data[largest], data[i]
c[1] += 1 #one comparision done above
heapsort_heapify(data, num, largest, c)
return c
def heapsort (data):
num = len(data)
c = [0, 0]
# build a MAXHEAP on the data set in place
# assuming the array is a binary heap, but needs heapification
# rightmost node in the second-last level
for i in xrange (num/2 - 1,-1, -1):
heapsort_heapify(data, num, i, c)
# make a max heap out of the array
# pick the max everytime and shift with the rightmost
for i in xrange(num-1, -1, -1):
data[i], data[0] = data[0], data[i]
c[1] += 1
heapsort_heapify(data, i, 0, c)
return c
#******************************************************************************
#******************************************************************************
def main (num):
###########################################################################
output = {}
# Key : 'name of sorting algorithm'
# Values : List of [time , [comparisions, swaps], description
print "SORTING " + str(num) + " ENTRIES\n"
###########################################################################
print "HEAP SORT"
data = [int(random.random()*100) for i in xrange(num)]
t1 = time.time()
c = heapsort(data)
t2 = time.time()
output['heapsort_unsorted'] = [t2-t1, c, "heapsort on unsorted data"]
##########
t1 = time.time()
c = heapsort(data)
t2 = time.time()
output['heapsort_sorted'] = [t2-t1, c, "heapsort on sorted data"]
##########
data.reverse()
t1 = time.time()
c = heapsort(data)
t2 = time.time()
output['heapsort_worstcase'] = [t2-t1, c, "heapsort on reverse sorted data"]
###########################################################################
print "BUBBLE SORT"
data = [int(random.random()*100) for i in xrange(num)]
t1 = time.time()
c = bubble_sort(data)
t2 = time.time()
output['bubble_unsorted'] = [t2-t1, c, "bubble sort on unsorted data"]
##########
t1 = time.time()
c = bubble_sort(data)
t2 = time.time()
output['bubble_sorted'] = [t2-t1, c, "bubble sort on sorted data"]
##########
data.reverse()
t1 = time.time()
c = bubble_sort(data)
t2 = time.time()
output['bubble_worstcase'] = [t2-t1, c, "bubble sort on reverse sorted data"]
###########################################################################
#print "RECURSIVE BUBBLE SORT"
#data = [int(random.random()*100) for i in xrange(num)]
#t = time.time()
#recursive_bubble_sort(data, len(data))
#print " Using recur bubble sort time taken .. " + str(time.time() - t)
#t = time.time()
#recursive_bubble_sort(data, len(data))
#print " Using recur bubble sort time taken (on sorted array).. " + str(time.time() - t)
#data.reverse()
#t = time.time()
#recursive_bubble_sort(data, len(data))
#print " Using recur bubble sort time taken (worst case reverse sorted array).. " + str(time.time() - t)
###########################################################################
print "SELECTION SORT"
data = [int(random.random()*100) for i in xrange(num)]
t1 = time.time()
c = sel_sort(data)
t2 = time.time()
output['selection_unsorted'] = [t2-t1, c, "selection sort on unsorted data"]
##########
t1 = time.time()
c = sel_sort(data)
t2 = time.time()
output['selection_sorted'] = [t2-t1, c, "selection sort on sorted data"]
##########
data.reverse()
t1 = time.time()
c = sel_sort(data)
t2 = time.time()
output['selection_worstcase'] = [t2-t1, c, "selection sort on reverse sorted data"]
###########################################################################
print "QUICK SORT"
data = [int(random.random()*100) for i in xrange(num)]
t1 = time.time()
c = quick_sort(data, 0, len(data) - 1)
t2 = time.time()
output['quick_unsorted'] = [t2-t1, c, "quick sort on unsorted data"]
t1 = time.time()
c = quick_sort(data, 0, len(data) - 1)
t2 = time.time()
output['quick_sorted'] = [t2-t1, c, "quick sort on unsorted data"]
data.reverse()
t1 = time.time()
c = quick_sort(data, 0, len(data) - 1)
t2 = time.time()
output['quick_worstcase'] = [t2-t1, c, "quick sort on reverse sorted data"]
###########################################################################
print "MERGE SORT"
data = [int(random.random()*100) for i in xrange(num)]
t1 = time.time()
merge_sort(data, 0, len(data) - 1)
t2 = time.time()
output['mergesort_unsorted'] = [t2-t1, c, "mergesort on unsorted data"]
t1 = time.time()
merge_sort(data, 0, len(data) -1)
t2 = time.time()
output['mergesort_sorted'] = [t2-t1, c, "mergesort on sorted data"]
data.reverse()
t1 = time.time()
merge_sort(data, 0, len(data)-1)
t2 = time.time()
output['mergesort_worstcase'] = [t2-t1, c, "mergesort on reverse sorted data"]
###########################################################################
print "INSERTION SORT"
data = [int(random.random()*100) for i in xrange(num)]
t1 = time.time()
c = insertion_sort(data)
t2 = time.time()
output['insertionsort_unsorted'] = [t2-t1, c, "insertion sort on unsorted data"]
t1 = time.time()
c = insertion_sort(data)
t2 = time.time()
output['insertionsort_sorted'] = [t2-t1, c, "insertion sort on sorted data"]
data.reverse()
t1 = time.time()
c = insertion_sort(data)
t2 = time.time()
output['insertionsort_worstcase'] = [t2-t1, c, "insertion sort on reverse sorted data"]
###########################################################################
#TBD : format output
print output
return
#******************************************************************************
#******************************************************************************
main(1000)
#******************************************************************************
|
apache-2.0
| 6,825,287,574,014,931,000
| 32.340361
| 109
| 0.437348
| false
| 3.690897
| false
| false
| false
|
hackfestca/cnb
|
cnb/modAvailable/CNBMMEncode.py
|
1
|
5840
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
CNB Matrix Module - encode
'''
import string
import urllib
import base64
from cnb.cnbMatrixModule import CNBMatrixModule
class CNBMMEncode(CNBMatrixModule):
"""
"""
name = 'encode'
usage = ''
desc = 'Encode a string using different algorithm'
aliases = []
isAdmin = False
def __init__(self,log):
CNBMatrixModule.__init__(self,log)
self._initOptParser()
def _initOptParser(self):
CNBMatrixModule._initOptParser(self,False)
encodeGrp = self.parser.add_argument_group('Encoding Options')
decodeGrp = self.parser.add_argument_group('Decoding Options')
encodeGrp.add_argument('--rot13', action='store_true', dest='rot13', default=False,\
help='Encode in rot13')
encodeGrp.add_argument('--rotn', action='store_true', dest='rotn', default=False,\
help='Encode in rotN (need to specify -n [0-26])')
encodeGrp.add_argument('--rotall', action='store_true', dest='rotall', default=False,\
help='Encode in all possible rotN (multiple output)')
encodeGrp.add_argument('--b64', action='store_true', dest='b64', default=False,\
help='Encode in base64')
encodeGrp.add_argument('--morse', action='store_true', dest='morse', default=False,\
help='Encode in morse')
encodeGrp.add_argument('--url', action='store_true', dest='url', default=False,\
help='Encode in URL')
decodeGrp.add_argument('--ub64', action='store_true', dest='ub64', default=False,\
help='Decode string from base64')
decodeGrp.add_argument('--umorse', action='store_true', dest='umorse', default=False,\
help='Decode string from morse')
decodeGrp.add_argument('--uurl', action='store_true', dest='uurl', default=False,\
help='Decode string from URL')
self.parser.add_argument("-h", "--help", action="store_true", dest='help', default=False,\
help='Display help')
self.parser.add_argument("-n", action="store", dest='n', type=int, default=0,\
help='Set a rotation iterator (for --rotn only)', nargs=1)
self.parser.add_argument('string', metavar='STRING', action='store', default='',\
help='Text to encode or Cipher to decode', nargs='*')
def _rotN(self, s, n):
lc = string.lowercase
trans = string.maketrans(lc, lc[n:] + lc[:n])
return string.translate(s, trans)
def __del__(self):
pass
def processCmd(self, oMsg):
result = 'Missing arguments, check help'
(args, err) = self.getParsedArgs(oMsg.args)
if args.string != '':
s = ' '.join(args.string)
else:
s = ''
if err != '':
result = err
elif args.rot13:
if s != '':
result = s.encode('rot13')
elif args.rotn:
if s != '' and args.n >= 0:
result = self._rotN(s,args.n)
elif args.rotall:
if s != '':
result = ''
for i in range(1,26):
result = result + self._rotN(s,i) + "\n"
elif args.b64:
if s != '':
result = base64.b64encode(s)
elif args.morse:
if s != '':
result = MorseEncoder().encode(s)
elif args.url:
if s != '':
result = urllib.quote(s)
elif args.ub64:
if s != '':
result = base64.b64decode(s)
elif args.umorse:
if s != '':
result = MorseEncoder().decode(s)
elif args.uurl:
if s != '':
result = urllib.unquote(s)
elif args.help:
result = self.getUsage()
else:
result = self.getUsage()
return result
class MorseEncoder():
morseAlphabet ={
"A" : ".-",
"B" : "-...",
"C" : "-.-.",
"D" : "-..",
"E" : ".",
"F" : "..-.",
"G" : "--.",
"H" : "....",
"I" : "..",
"J" : ".---",
"K" : "-.-",
"L" : ".-..",
"M" : "--",
"N" : "-.",
"O" : "---",
"P" : ".--.",
"Q" : "--.-",
"R" : ".-.",
"S" : "...",
"T" : "-",
"U" : "..-",
"V" : "...-",
"W" : ".--",
"X" : "-..-",
"Y" : "-.--",
"Z" : "--..",
" " : "/",
"." : "/"
}
def __init__(self):
self.inverseMorseAlphabet = dict((v,k) for (k,v) in self.morseAlphabet.items())
def decode(self, code, positionInString = 0):
"""
parse a morse code string positionInString is the starting point for decoding
"""
if positionInString < len(code):
morseLetter = ""
for key,char in enumerate(code[positionInString:]):
if char == " ":
positionInString = key + positionInString + 1
letter = self.inverseMorseAlphabet[morseLetter]
return letter + self.decode(code, positionInString)
else:
morseLetter += char
else:
return ""
def encode(self,message):
"""
encode a message in morse code, spaces between words are represented by '/'
"""
encodedMessage = ""
for char in message[:]:
if char.upper() in self.morseAlphabet:
encodedMessage += self.morseAlphabet[char.upper()] + " "
return encodedMessage
|
gpl-3.0
| -2,542,247,868,136,886,000
| 31.087912
| 98
| 0.474315
| false
| 3.893333
| false
| false
| false
|
tseaver/gcloud-python
|
pubsub/google/cloud/pubsub_v1/gapic/publisher_client.py
|
1
|
40338
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.pubsub.v1 Publisher API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.path_template
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.pubsub_v1.gapic import publisher_client_config
from google.cloud.pubsub_v1.gapic.transports import publisher_grpc_transport
from google.cloud.pubsub_v1.proto import pubsub_pb2
from google.cloud.pubsub_v1.proto import pubsub_pb2_grpc
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-pubsub', ).version
class PublisherClient(object):
"""
The service that an application uses to manipulate topics, and to send
messages to a topic.
"""
SERVICE_ADDRESS = 'pubsub.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub', )
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.pubsub.v1.Publisher'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PublisherClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def topic_path(cls, project, topic):
"""Return a fully-qualified topic string."""
return google.api_core.path_template.expand(
'projects/{project}/topics/{topic}',
project=project,
topic=topic,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=publisher_client_config.config,
client_info=None):
"""Constructor.
Args:
transport (Union[~.PublisherGrpcTransport,
Callable[[~.Credentials, type], ~.PublisherGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning)
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.', PendingDeprecationWarning)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=publisher_grpc_transport.
PublisherGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = publisher_grpc_transport.PublisherGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_topic(self,
name,
labels=None,
message_storage_policy=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates the given topic with the given name. See the
<a href=\"/pubsub/docs/admin#resource_names\"> resource name rules</a>.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> name = client.topic_path('[PROJECT]', '[TOPIC]')
>>>
>>> response = client.create_topic(name)
Args:
name (str): The name of the topic. It must have the format
``\"projects/{project}/topics/{topic}\"``. ``{topic}`` must start with a letter,
and contain only letters (``[A-Za-z]``), numbers (``[0-9]``), dashes (``-``),
underscores (``_``), periods (``.``), tildes (``~``), plus (``+``) or percent
signs (``%``). It must be between 3 and 255 characters in length, and it
must not start with ``\"goog\"``.
labels (dict[str -> str]): User labels.
message_storage_policy (Union[dict, ~google.cloud.pubsub_v1.types.MessageStoragePolicy]): Policy constraining how messages published to the topic may be stored. It
is determined when the topic is created based on the policy configured at
the project level. It must not be set by the caller in the request to
CreateTopic or to UpdateTopic. This field will be populated in the
responses for GetTopic, CreateTopic, and UpdateTopic: if not present in the
response, then no constraints are in effect.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.MessageStoragePolicy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Topic` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_topic' not in self._inner_api_calls:
self._inner_api_calls[
'create_topic'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_topic,
default_retry=self._method_configs['CreateTopic'].retry,
default_timeout=self._method_configs['CreateTopic'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.Topic(
name=name,
labels=labels,
message_storage_policy=message_storage_policy,
)
return self._inner_api_calls['create_topic'](
request, retry=retry, timeout=timeout, metadata=metadata)
def update_topic(self,
topic,
update_mask,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates an existing topic. Note that certain properties of a
topic are not modifiable.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> # TODO: Initialize ``topic``:
>>> topic = {}
>>>
>>> # TODO: Initialize ``update_mask``:
>>> update_mask = {}
>>>
>>> response = client.update_topic(topic, update_mask)
Args:
topic (Union[dict, ~google.cloud.pubsub_v1.types.Topic]): The updated topic object.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.Topic`
update_mask (Union[dict, ~google.cloud.pubsub_v1.types.FieldMask]): Indicates which fields in the provided topic to update. Must be specified
and non-empty. Note that if ``update_mask`` contains
\"message_storage_policy\" then the new value will be determined based on the
policy configured at the project or organization level. The
``message_storage_policy`` must not be set in the ``topic`` provided above.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Topic` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_topic' not in self._inner_api_calls:
self._inner_api_calls[
'update_topic'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_topic,
default_retry=self._method_configs['UpdateTopic'].retry,
default_timeout=self._method_configs['UpdateTopic'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.UpdateTopicRequest(
topic=topic,
update_mask=update_mask,
)
return self._inner_api_calls['update_topic'](
request, retry=retry, timeout=timeout, metadata=metadata)
def publish(self,
topic,
messages,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Adds one or more messages to the topic. Returns ``NOT_FOUND`` if the topic
does not exist. The message payload must not be empty; it must contain
either a non-empty data field, or at least one attribute.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> topic = client.topic_path('[PROJECT]', '[TOPIC]')
>>> data = b''
>>> messages_element = {'data': data}
>>> messages = [messages_element]
>>>
>>> response = client.publish(topic, messages)
Args:
topic (str): The messages in the request will be published on this topic.
Format is ``projects/{project}/topics/{topic}``.
messages (list[Union[dict, ~google.cloud.pubsub_v1.types.PubsubMessage]]): The messages to publish.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.PubsubMessage`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.PublishResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'publish' not in self._inner_api_calls:
self._inner_api_calls[
'publish'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.publish,
default_retry=self._method_configs['Publish'].retry,
default_timeout=self._method_configs['Publish'].timeout,
client_info=self._client_info,
)
request = pubsub_pb2.PublishRequest(
topic=topic,
messages=messages,
)
return self._inner_api_calls['publish'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_topic(self,
topic,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets the configuration of a topic.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> topic = client.topic_path('[PROJECT]', '[TOPIC]')
>>>
>>> response = client.get_topic(topic)
Args:
topic (str): The name of the topic to get.
Format is ``projects/{project}/topics/{topic}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Topic` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_topic' not in self._inner_api_calls:
self._inner_api_calls[
'get_topic'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_topic,
default_retry=self._method_configs['GetTopic'].retry,
default_timeout=self._method_configs['GetTopic'].timeout,
client_info=self._client_info,
)
request = pubsub_pb2.GetTopicRequest(topic=topic, )
return self._inner_api_calls['get_topic'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_topics(self,
project,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists matching topics.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> project = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_topics(project):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_topics(project, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
project (str): The name of the cloud project that topics belong to.
Format is ``projects/{project}``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.pubsub_v1.types.Topic` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_topics' not in self._inner_api_calls:
self._inner_api_calls[
'list_topics'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_topics,
default_retry=self._method_configs['ListTopics'].retry,
default_timeout=self._method_configs['ListTopics'].timeout,
client_info=self._client_info,
)
request = pubsub_pb2.ListTopicsRequest(
project=project,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_topics'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='topics',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def list_topic_subscriptions(
self,
topic,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists the names of the subscriptions on this topic.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> topic = client.topic_path('[PROJECT]', '[TOPIC]')
>>>
>>> # Iterate over all results
>>> for element in client.list_topic_subscriptions(topic):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_topic_subscriptions(topic, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
topic (str): The name of the topic that subscriptions are attached to.
Format is ``projects/{project}/topics/{topic}``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`str` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_topic_subscriptions' not in self._inner_api_calls:
self._inner_api_calls[
'list_topic_subscriptions'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_topic_subscriptions,
default_retry=self.
_method_configs['ListTopicSubscriptions'].retry,
default_timeout=self.
_method_configs['ListTopicSubscriptions'].timeout,
client_info=self._client_info,
)
request = pubsub_pb2.ListTopicSubscriptionsRequest(
topic=topic,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_topic_subscriptions'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='subscriptions',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def delete_topic(self,
topic,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes the topic with the given name. Returns ``NOT_FOUND`` if the topic
does not exist. After a topic is deleted, a new topic may be created with
the same name; this is an entirely new topic with none of the old
configuration or subscriptions. Existing subscriptions to this topic are
not deleted, but their ``topic`` field is set to ``_deleted-topic_``.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> topic = client.topic_path('[PROJECT]', '[TOPIC]')
>>>
>>> client.delete_topic(topic)
Args:
topic (str): Name of the topic to delete.
Format is ``projects/{project}/topics/{topic}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_topic' not in self._inner_api_calls:
self._inner_api_calls[
'delete_topic'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_topic,
default_retry=self._method_configs['DeleteTopic'].retry,
default_timeout=self._method_configs['DeleteTopic'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.DeleteTopicRequest(topic=topic, )
self._inner_api_calls['delete_topic'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_iam_policy(self,
resource,
policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the access control policy on the specified resource. Replaces any
existing policy.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> resource = client.topic_path('[PROJECT]', '[TOPIC]')
>>>
>>> # TODO: Initialize ``policy``:
>>> policy = {}
>>>
>>> response = client.set_iam_policy(resource, policy)
Args:
resource (str): REQUIRED: The resource for which the policy is being specified.
``resource`` is usually specified as a path. For example, a Project
resource is specified as ``projects/{project}``.
policy (Union[dict, ~google.cloud.pubsub_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The size of
the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.Policy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_iam_policy' not in self._inner_api_calls:
self._inner_api_calls[
'set_iam_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_iam_policy,
default_retry=self._method_configs['SetIamPolicy'].retry,
default_timeout=self._method_configs['SetIamPolicy'].
timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource,
policy=policy,
)
return self._inner_api_calls['set_iam_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_iam_policy(self,
resource,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> resource = client.topic_path('[PROJECT]', '[TOPIC]')
>>>
>>> response = client.get_iam_policy(resource)
Args:
resource (str): REQUIRED: The resource for which the policy is being requested.
``resource`` is usually specified as a path. For example, a Project
resource is specified as ``projects/{project}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_iam_policy' not in self._inner_api_calls:
self._inner_api_calls[
'get_iam_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_iam_policy,
default_retry=self._method_configs['GetIamPolicy'].retry,
default_timeout=self._method_configs['GetIamPolicy'].
timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, )
return self._inner_api_calls['get_iam_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
def test_iam_permissions(self,
resource,
permissions,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.PublisherClient()
>>>
>>> resource = client.topic_path('[PROJECT]', '[TOPIC]')
>>>
>>> # TODO: Initialize ``permissions``:
>>> permissions = []
>>>
>>> response = client.test_iam_permissions(resource, permissions)
Args:
resource (str): REQUIRED: The resource for which the policy detail is being requested.
``resource`` is usually specified as a path. For example, a Project
resource is specified as ``projects/{project}``.
permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with
wildcards (such as '*' or 'storage.*') are not allowed. For more
information see
`IAM Overview <https://cloud.google.com/iam/docs/overview#permissions>`_.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.TestIamPermissionsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'test_iam_permissions' not in self._inner_api_calls:
self._inner_api_calls[
'test_iam_permissions'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.test_iam_permissions,
default_retry=self._method_configs['TestIamPermissions'].
retry,
default_timeout=self._method_configs['TestIamPermissions'].
timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource,
permissions=permissions,
)
return self._inner_api_calls['test_iam_permissions'](
request, retry=retry, timeout=timeout, metadata=metadata)
|
apache-2.0
| 2,759,004,533,117,325,300
| 44.838636
| 175
| 0.574768
| false
| 4.751796
| true
| false
| false
|
rdeits/cryptics
|
pycryptics/grammar/cfg.py
|
1
|
3899
|
import nltk.grammar as gram
import pycryptics.grammar.nodes as nd
from pycryptics.utils.indicators import INDICATORS
"""
A Context Free Grammar (CFG) to describe allowed substructures of cryptic crossword clues and how to solve each substructure.
"""
# The basic wordplay transforms
top = gram.Nonterminal(nd.TopNode)
lit = gram.Nonterminal(nd.LitNode)
d = gram.Nonterminal(nd.DNode)
syn = gram.Nonterminal(nd.SynNode)
first = gram.Nonterminal(nd.FirstNode)
null = gram.Nonterminal(nd.NullNode)
# Clue functions
ana = gram.Nonterminal(nd.AnaNode)
sub = gram.Nonterminal(nd.SubNode)
sub_init = gram.Nonterminal(nd.SubInitNode)
sub_final = gram.Nonterminal(nd.SubFinalNode)
ins = gram.Nonterminal(nd.InsNode)
rev = gram.Nonterminal(nd.RevNode)
# ana_, rev_, etc. are anagram/reversal/etc indicators,
# so they produce no text in the wordplay output
ana_ = gram.Nonterminal(nd.AnaIndNode)
sub_ = gram.Nonterminal(nd.SubIndNode)
sub_init_ = gram.Nonterminal(nd.SubInitIndNode)
sub_final_ = gram.Nonterminal(nd.SubFinalIndNode)
ins_ = gram.Nonterminal(nd.InsIndNode)
rev_ = gram.Nonterminal(nd.RevIndNode)
ind_nodes = [nd.AnaIndNode, nd.SubIndNode, nd.SubFinalIndNode, nd.SubInitIndNode, nd.InsIndNode, nd.RevIndNode]
# All the *_arg elements just exist to make the production rules more clear
# so they just pass their inputs literally
clue_arg = gram.Nonterminal(nd.ClueArgNode)
ins_arg = gram.Nonterminal(nd.InsArgNode)
ana_arg = gram.Nonterminal(nd.AnaArgNode)
sub_arg = gram.Nonterminal(nd.SubArgNode)
rev_arg = gram.Nonterminal(nd.RevArgNode)
production_rules = {
ins: [[ins_arg, ins_, ins_arg], [ins_arg, ins_arg, ins_]],
ana: [[ana_arg, ana_], [ana_, ana_arg]],
sub: [[sub_arg, sub_], [sub_, sub_arg]],
sub_init: [[sub_arg, sub_init_], [sub_init_, sub_arg]],
sub_final: [[sub_arg, sub_final_], [sub_final_, sub_arg]],
rev: [[rev_arg, rev_], [rev_, rev_arg]],
clue_arg: [[lit], [syn], [first], [null], [ana], [sub], [ins], [rev], [sub_init], [sub_final]],
ins_arg: [[lit], [ana], [syn], [sub], [sub_init], [sub_final], [first], [rev]],
ana_arg: [[lit]],
sub_arg: [[lit], [syn], [rev]],
rev_arg: [[lit], [syn]],
top: [[clue_arg, d],
[clue_arg, clue_arg, d],
[clue_arg, clue_arg, clue_arg, d],
[d, clue_arg],
[d, clue_arg, clue_arg],
[d, clue_arg, clue_arg, clue_arg],
]
}
additional_clue_rules = [[sub_init_] + [first] * i for i in range(3, 8)] + [[first] * i + [sub_init_] for i in range(3, 8)]
for r in additional_clue_rules:
production_rules[top].append(r + [d])
production_rules[top].append([d] + r)
base_prods = []
for n, rules in production_rules.items():
for r in rules:
base_prods.append(gram.Production(n, r))
known_functions = {'in': [ins_, lit, null, sub_],
'a': [lit, syn, null],
'is': [null, lit],
'for': [null, syn],
'large': [first, syn],
'primarily': [sub_init_],
'and': [null, lit],
'of': [null],
'on': [ins_, null, lit, syn],
'with': [null, ins_]}
def generate_grammar(phrases):
prods = []
for p in phrases:
if p in known_functions:
tags = known_functions[p]
else:
found = False
tags = [lit, d, syn, first]
for ind in ind_nodes:
if any(w == p or (len(w) > 5 and abs(len(w) - len(p)) <= 3 and p.startswith(w[:-3])) for w in INDICATORS[ind.name]):
tags.append(gram.Nonterminal(ind))
found = True
if not found:
tags = [lit, d, syn, first, ana_, sub_, sub_init_, sub_final_, rev_]
for t in tags:
prods.append(gram.Production(t, [p]))
return gram.ContextFreeGrammar(top, base_prods + prods)
|
mit
| 3,835,686,781,258,495,000
| 37.22549
| 132
| 0.598872
| false
| 2.929376
| false
| false
| false
|
Yanivs24/AutoPA
|
experiments/python_scripts/avg_results_files.py
|
1
|
1420
|
#!/usr/bin/python
# This file is part of AutoPA - automatic extraction of pre-aspiration
# from speech segments in audio files.
#
# Copyright (c) 2016 Yaniv Sheena
import os
import re
import sys
import numpy as np
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError("expected 1 value - results path")
results_path = sys.argv[1]
current_path = os.getcwd()
results_full_path = current_path + '/' + results_path
file_data = ''
numbers_res = []
for file in os.listdir(results_path):
with open(os.path.join(results_full_path, file), 'r') as f:
file_data = f.read()
# get all results in the order they appeared
numbers_res.append(map(float, re.findall('\d+\.\d+', file_data)))
text_pattern = re.split('\d+\.\d+', file_data)
# average results
avg_results = np.zeros(len(numbers_res[0]))
for res in numbers_res:
avg_results = avg_results+np.array(res)
avg_results /= len(numbers_res)
print 'Read %s result files - the average is:' % len(numbers_res)
print avg_results
# write results to file with the same format
res_path = os.path.join(results_full_path, 'averaged_results.txt')
f = open(res_path, 'w')
for num, part in zip(avg_results, text_pattern):
f.write(part)
f.write(str(num))
f.close()
print 'Saved average results to: %s' % res_path
|
lgpl-3.0
| 410,955,419,383,036,100
| 24.818182
| 73
| 0.620423
| false
| 3.372922
| false
| false
| false
|
xuweiliang/Codelibrary
|
nova/policies/licence.py
|
1
|
1502
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-licence'
POLICY_ROOT = 'os_compute_api:os-licence:%s'
licence_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER),
]
#licence_policies = [
# policy.RuleDefault(
# name=POLICY_ROOT % 'discoverable',
# check_str=base.RULE_ANY),
# policy.RuleDefault(
# name=BASE_POLICY_NAME,
# check_str=base.RULE_ADMIN_API),
# policy.RuleDefault(
# name=POLICY_ROOT % 'show',
# check_str=base.RULE_ADMIN_API),
# policy.RuleDefault(
# name=POLICY_ROOT % 'update',
# check_str=base.RULE_ADMIN_API)
#]
def list_rules():
return licence_policies
|
apache-2.0
| -5,257,845,597,965,646,000
| 29.04
| 78
| 0.675766
| false
| 3.39819
| false
| false
| false
|
srinath29/pyView
|
helper.py
|
1
|
2537
|
from PyQt4 import QtGui, uic, QtCore, Qt
import PyQt4
import pandas
import sys
import os
viewBase, viewForm = uic.loadUiType(os.path.join(os.path.dirname(os.path.realpath(__file__)),"view.ui"))
class Helper(viewBase, viewForm):
def __init__(self, parent = None):
super(viewBase,self).__init__(parent)
self.setupUi(self)
def __version__(self):
print("0.0.1")
def View(self, df):
"""
This is to view a data frame
:param df: pandas.Dataframe
:return: Qt object to view the data
"""
#df = pandas.DataFrame()
#self.table = QtGui.QTableWidget()
self.table.setColumnCount(len(df.columns))
self.table.setRowCount(len(df.index))
self.sortOrder = {}
for p in range(len(df.columns)):
self.sortOrder[p] = ""
for i in range(len(df.index)):
for j in range(len(df.columns)):
self.table.setItem(i,j, QtGui.QTableWidgetItem(str(df.iloc[i][j])))
for i in range(len(df.columns)):
self.table.setHorizontalHeaderItem(i,QtGui.QTableWidgetItem(str(df.columns[i])))
for i in range(len(df.index)):
self.table.setVerticalHeaderItem(i,QtGui.QTableWidgetItem(str(df.index[i])))
# for i in range(len(df.columns)):
# self.table.horizontalHeaderItem(i).setText(str(df.columns[i]))
self.horizHeader = self.table.horizontalHeader()
self.horizHeader.setSortIndicatorShown(True)
QtCore.QObject.connect(self.horizHeader, QtCore.SIGNAL("sectionClicked(int)"), self.sortByColumn)
self.show()
def sortByColumn(self,p):
#print(self.sortOrder[p])
if self.sortOrder[p]=="":
self.horizHeader.setSortIndicator(p, Qt.Qt.DescendingOrder)
self.table.sortByColumn(p, Qt.Qt.DescendingOrder)
self.sortOrder[p]="D"
elif self.sortOrder[p]=="A":
self.horizHeader.setSortIndicator(p, Qt.Qt.DescendingOrder)
self.table.sortByColumn(p, Qt.Qt.DescendingOrder)
self.sortOrder[p]="D"
elif self.sortOrder[p]=="D":
self.horizHeader.setSortIndicator(p, Qt.Qt.AscendingOrder)
self.table.sortByColumn(p, Qt.Qt.AscendingOrder)
self.sortOrder[p]="A"
def View(df):
app = QtGui.QApplication(sys.argv)
app.setStyle("plastique")
h = Helper()
h.View(df)
h.show()
app.exec_()
|
gpl-3.0
| -1,201,301,448,810,805,500
| 29.7125
| 105
| 0.592826
| false
| 3.470588
| false
| false
| false
|
PietroPasotti/AutomatedTagger
|
analyzers.py
|
1
|
1493
|
#analyzers.py
from main import _export,test,x2,RESULTS,evaluation
def overlap(dic1,dic2):
summ = len(set(dic1).union(set(dic2))) # if it's a dic, here we take the mere keys()
# if it's a list, we take all
overlapOfPlaindics = len(set(dic1)) + len(set(dic2)) - summ
return overlapOfPlaindics
def unpackKWS_CO(COres):
kws = []
for CO in COres:
CO1,CO2 = CO
kws.extend([CO1,CO2])
return kws
def matcher(item1,item2):
val = 0
COres1,FRres1 = RESULTS[item1]
COres2,FRres2 = RESULTS[item2]
overlapCORES = overlap(COres1, COres2)
kws1 = unpackKWS_CO(COres1)
kws2 = unpackKWS_CO(COres2)
overlapKWS_fromCO = overlap(kws1,kws2) # overlapping co_occurrence-extracted words
overlapKWS_fromFR = overlap(FRres1,FRres2) # overlapping words extracted by frequency
weight = 1.65 # ?
weightedoverlapKWS = weight*overlapKWS_fromCO + overlapKWS_fromFR
return weightedoverlapKWS
def analyze(RESULTS,verbose):
for item in _export['items']:
test(item,verbose)
top_co_occurr = x2.HighestNumbers(5, x2.co_occurrences ,getvalues = True)
top_freq = x2.HighestNumbers(5, x2.words_count ,getvalues = True)
RESULTS[item] = (top_co_occurr,top_freq)
for elem in _export['items']:
for otherelem in _export['items']:
if elem != otherelem and frozenset({elem,otherelem}) not in evaluation:
evaluation[frozenset({elem,otherelem})] = 'n/a.'
for pair in evaluation:
A,B = pair
evaluation[pair] = matcher(A,B)
return evaluation
|
agpl-3.0
| 4,131,902,085,462,726,700
| 23.883333
| 87
| 0.698593
| false
| 2.680431
| false
| false
| false
|
KeyWeeUsr/plyer
|
plyer/compat.py
|
1
|
1120
|
'''
Compatibility module for Python 2.7 and > 3.3
=============================================
'''
# pylint: disable=invalid-name
__all__ = ('PY2', 'string_types', 'queue', 'iterkeys',
'itervalues', 'iteritems', 'xrange')
import sys
try:
import queue
except ImportError:
import Queue as queue
#: True if Python 2 intepreter is used
PY2 = sys.version_info[0] == 2
#: String types that can be used for checking if a object is a string
string_types = None
text_type = None
if PY2:
# pylint: disable=undefined-variable
# built-in actually, so it is defined in globals() for py2
string_types = basestring
text_type = unicode
else:
string_types = text_type = str
if PY2:
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
else:
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
if PY2:
# pylint: disable=undefined-variable
# built-in actually, so it is defined in globals() for py2
xrange = xrange
else:
xrange = range
|
mit
| -9,110,793,413,409,874,000
| 24.454545
| 69
| 0.633929
| false
| 3.636364
| false
| false
| false
|
noironetworks/heat
|
heat/tests/openstack/keystone/test_service.py
|
1
|
10694
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.keystone import service
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
keystone_service_template = {
'heat_template_version': '2015-04-30',
'resources': {
'test_service': {
'type': 'OS::Keystone::Service',
'properties': {
'name': 'test_service_1',
'description': 'Test service',
'type': 'orchestration',
'enabled': False
}
}
}
}
class KeystoneServiceTest(common.HeatTestCase):
def setUp(self):
super(KeystoneServiceTest, self).setUp()
self.ctx = utils.dummy_context()
# Mock client
self.keystoneclient = mock.Mock()
self.patchobject(resource.Resource, 'client',
return_value=fake_ks.FakeKeystoneClient(
client=self.keystoneclient))
self.services = self.keystoneclient.services
# Mock client plugin
self.keystone_client_plugin = mock.MagicMock()
def _setup_service_resource(self, stack_name, use_default=False):
tmpl_data = copy.deepcopy(keystone_service_template)
if use_default:
props = tmpl_data['resources']['test_service']['properties']
del props['name']
del props['enabled']
del props['description']
test_stack = stack.Stack(
self.ctx, stack_name,
template.Template(tmpl_data)
)
r_service = test_stack['test_service']
r_service.client = mock.MagicMock()
r_service.client.return_value = self.keystoneclient
r_service.client_plugin = mock.MagicMock()
r_service.client_plugin.return_value = self.keystone_client_plugin
return r_service
def _get_mock_service(self):
value = mock.MagicMock()
value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
return value
def test_service_handle_create(self):
rsrc = self._setup_service_resource('test_service_create')
mock_service = self._get_mock_service()
self.services.create.return_value = mock_service
# validate the properties
self.assertEqual(
'test_service_1',
rsrc.properties.get(service.KeystoneService.NAME))
self.assertEqual(
'Test service',
rsrc.properties.get(
service.KeystoneService.DESCRIPTION))
self.assertEqual(
'orchestration',
rsrc.properties.get(service.KeystoneService.TYPE))
self.assertFalse(rsrc.properties.get(
service.KeystoneService.ENABLED))
rsrc.handle_create()
# validate service creation
self.services.create.assert_called_once_with(
name='test_service_1',
description='Test service',
type='orchestration',
enabled=False)
# validate physical resource id
self.assertEqual(mock_service.id, rsrc.resource_id)
def test_service_handle_create_default(self):
rsrc = self._setup_service_resource('test_create_with_defaults',
use_default=True)
mock_service = self._get_mock_service()
self.services.create.return_value = mock_service
rsrc.physical_resource_name = mock.MagicMock()
rsrc.physical_resource_name.return_value = 'foo'
# validate the properties
self.assertIsNone(
rsrc.properties.get(service.KeystoneService.NAME))
self.assertIsNone(rsrc.properties.get(
service.KeystoneService.DESCRIPTION))
self.assertEqual(
'orchestration',
rsrc.properties.get(service.KeystoneService.TYPE))
self.assertTrue(rsrc.properties.get(service.KeystoneService.ENABLED))
rsrc.handle_create()
# validate service creation with physical resource name
self.services.create.assert_called_once_with(
name='foo',
description=None,
type='orchestration',
enabled=True)
def test_service_handle_update(self):
rsrc = self._setup_service_resource('test_update')
rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {service.KeystoneService.NAME: 'test_service_1_updated',
service.KeystoneService.DESCRIPTION:
'Test Service updated',
service.KeystoneService.TYPE: 'heat_updated',
service.KeystoneService.ENABLED: False}
rsrc.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
self.services.update.assert_called_once_with(
service=rsrc.resource_id,
name=prop_diff[service.KeystoneService.NAME],
description=prop_diff[service.KeystoneService.DESCRIPTION],
type=prop_diff[service.KeystoneService.TYPE],
enabled=prop_diff[service.KeystoneService.ENABLED]
)
def test_service_handle_update_default_name(self):
rsrc = self._setup_service_resource('test_update_default_name')
rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
rsrc.physical_resource_name = mock.MagicMock()
rsrc.physical_resource_name.return_value = 'foo'
# Name is reset to None, so default to physical resource name
prop_diff = {service.KeystoneService.NAME: None}
rsrc.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
# validate default name to physical resource name
self.services.update.assert_called_once_with(
service=rsrc.resource_id,
name='foo',
type=None,
description=None,
enabled=None
)
def test_service_handle_update_only_enabled(self):
rsrc = self._setup_service_resource('test_update_enabled_only')
rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {service.KeystoneService.ENABLED: False}
rsrc.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
self.services.update.assert_called_once_with(
service=rsrc.resource_id,
name=None,
description=None,
type=None,
enabled=prop_diff[service.KeystoneService.ENABLED]
)
def test_properties_title(self):
property_title_map = {
service.KeystoneService.NAME: 'name',
service.KeystoneService.DESCRIPTION: 'description',
service.KeystoneService.TYPE: 'type',
service.KeystoneService.ENABLED: 'enabled'
}
for actual_title, expected_title in property_title_map.items():
self.assertEqual(
expected_title,
actual_title,
'KeystoneService PROPERTIES(%s) title modified.' %
actual_title)
def test_property_name_validate_schema(self):
schema = service.KeystoneService.properties_schema[
service.KeystoneService.NAME]
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
service.KeystoneService.NAME)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
service.KeystoneService.NAME)
self.assertEqual('Name of keystone service.',
schema.description,
'description for property %s is modified' %
service.KeystoneService.NAME)
def test_property_description_validate_schema(self):
schema = service.KeystoneService.properties_schema[
service.KeystoneService.DESCRIPTION]
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
service.KeystoneService.DESCRIPTION)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
service.KeystoneService.DESCRIPTION)
self.assertEqual('Description of keystone service.',
schema.description,
'description for property %s is modified' %
service.KeystoneService.DESCRIPTION)
def test_property_type_validate_schema(self):
schema = service.KeystoneService.properties_schema[
service.KeystoneService.TYPE]
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
service.KeystoneService.TYPE)
self.assertTrue(
schema.required,
'required for property %s is modified' %
service.KeystoneService.TYPE)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
service.KeystoneService.TYPE)
self.assertEqual('Type of keystone Service.',
schema.description,
'description for property %s is modified' %
service.KeystoneService.TYPE)
def test_show_resource(self):
rsrc = self._setup_service_resource('test_show_resource')
moc_service = mock.Mock()
moc_service.to_dict.return_value = {'attr': 'val'}
self.services.get.return_value = moc_service
attributes = rsrc._show_resource()
self.assertEqual({'attr': 'val'}, attributes)
|
apache-2.0
| -5,912,710,049,060,024,000
| 36.391608
| 78
| 0.601085
| false
| 4.35956
| true
| false
| false
|
iulian787/spack
|
var/spack/repos/builtin/packages/ppopen-appl-fdm-at/package.py
|
2
|
1685
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PpopenApplFdmAt(MakefilePackage):
"""ppOpen-APPL/FDM with Auto-Tuning"""
homepage = "http://ppopenhpc.cc.u-tokyo.ac.jp/ppopenhpc/"
git = "https://github.com/Post-Peta-Crest/ppOpenHPC.git"
version('master', branch='ATA/FDM')
depends_on('mpi')
# depends_on('ppopen-appl-fdm', type='build')
build_directory = "3.hybrid_AT"
parallel = False
def edit(self, spec, prefix):
with working_dir(self.build_directory):
fflags = ['-O3', self.compiler.openmp_flag]
if spec.satisfies('%gcc'):
fflags.append('-ffree-line-length-none')
if spec.satisfies('arch=x86_64:'):
fflags.append('-mcmodel=medium')
makefile_opt = FileFilter('Makefile.option')
makefile_opt.filter(
'FC = .*$',
'FC = {0}'.format(spec['mpi'].mpifc)
)
makefile_opt.filter(
'FFLAGS = .*$',
'FFLAGS = -O3 {0}'.format(' '.join(fflags))
)
def install(self, spec, prefix):
mkdir(prefix.bin)
copy(join_path(self.build_directory, 'seism3d3n'), prefix.bin)
install_src_dir = join_path(prefix.src, self.build_directory)
mkdirp(install_src_dir)
install_tree(self.build_directory, install_src_dir)
with working_dir(install_src_dir):
make('clean')
mkdir(prefix.doc)
copy('readme.txt', prefix.doc)
|
lgpl-2.1
| 2,089,136,601,130,362,000
| 33.387755
| 73
| 0.587537
| false
| 3.431772
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.