repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
anomaly/prestans
|
prestans/rest/response.py
|
1
|
13386
|
import webob
from prestans import exception
from prestans.http import STATUS
from prestans.parser import AttributeFilter
from prestans import serializer
from prestans.types import Array
from prestans.types import BinaryResponse
from prestans.types import DataCollection
from prestans.types import Model
class Response(webob.Response):
"""
Response is the writable HTTP response. It inherits and leverages
from webob.Response to do the heavy lifting of HTTP Responses. It adds to
webob.Response prestans customisations.
Overrides content_type property to use prestans' serializers with the set body
"""
def __init__(self, charset, logger, serializers, default_serializer):
super(Response, self).__init__()
self._logger = logger
self._serializers = serializers
self._default_serializer = default_serializer
self._selected_serializer = None
self._template = None
self._app_iter = []
self._minify = False
self._attribute_filter = None
self._template = None
self._charset = charset
#:
#: IETF hash dropped the X- prefix for custom headers
#: http://stackoverflow.com/q/3561381
#: http://tools.ietf.org/html/draft-saintandre-xdash-00
#:
from prestans import __version__ as version
if not isinstance(version, str):
version = version.encode("latin1")
self.headers.add('Prestans-Version', version)
@property
def minify(self):
return self._minify
@minify.setter
def minify(self, value):
self._minify = value
@property
def logger(self):
return self._logger
@property
def supported_mime_types(self):
return [serializer.content_type() for serializer in self._serializers]
@property
def supported_mime_types_str(self):
return ''.join(str(mime_type) + ',' for mime_type in self.supported_mime_types)[:-1]
@property
def selected_serializer(self):
return self._selected_serializer
@property
def default_serializer(self):
return self._default_serializer
def _set_serializer_by_mime_type(self, mime_type):
"""
:param mime_type:
:return:
used by content_type_set to set get a reference to the appropriate serializer
"""
# ignore if binary response
if isinstance(self._app_iter, BinaryResponse):
self.logger.info("ignoring setting serializer for binary response")
return
for available_serializer in self._serializers:
if available_serializer.content_type() == mime_type:
self._selected_serializer = available_serializer
self.logger.info("set serializer for mime type: %s" % mime_type)
return
self.logger.info("could not find serializer for mime type: %s" % mime_type)
raise exception.UnsupportedVocabularyError(mime_type, self.supported_mime_types_str)
@property
def template(self):
"""
is an instance of prestans.types.DataType; mostly a subclass of prestans.types.Model
"""
return self._template
@template.setter
def template(self, value):
if value is not None and (not isinstance(value, DataCollection) and
not isinstance(value, BinaryResponse)):
raise TypeError("template in response must be of type prestans.types.DataCollection or subclass")
self._template = value
#:
#: Attribute filter setup
#:
@property
def attribute_filter(self):
return self._attribute_filter
@attribute_filter.setter
def attribute_filter(self, value):
if value is not None and not isinstance(value, AttributeFilter):
msg = "attribute_filter in response must be of type prestans.types.AttributeFilter"
raise TypeError(msg)
self._attribute_filter = value
def _content_type__get(self):
"""
Get/set the Content-Type header (or None), *without* the
charset or any parameters.
If you include parameters (or ``;`` at all) when setting the
content_type, any existing parameters will be deleted;
otherwise they will be preserved.
"""
header = self.headers.get('Content-Type')
if not header:
return None
return header.split(';', 1)[0]
def _content_type__set(self, value):
# skip for responses that have no body
if self.status_code in [STATUS.NO_CONTENT, STATUS.PERMANENT_REDIRECT, STATUS.TEMPORARY_REDIRECT]:
self.logger.info("attempt to set Content-Type to %s being ignored due to empty response" % value)
self._content_type__del()
else:
self._set_serializer_by_mime_type(value)
if ';' not in value:
header = self.headers.get('Content-Type', '')
if ';' in header:
params = header.split(';', 1)[1]
value += ';' + params
self.headers['Content-Type'] = value
self.logger.info("Content-Type set to: %s" % value)
def _content_type__del(self):
self.headers.pop('Content-Type', None)
# content_type; overrides webob.Response line 606
content_type = property(
_content_type__get,
_content_type__set,
_content_type__del,
doc=_content_type__get.__doc__
)
# body; overrides webob.Response line 324
@property
def body(self):
"""
Overridden response does not support md5, text or json properties. _app_iter
is set using rules defined by prestans.
body getter will return the validated prestans model.
webob does the heavy lifting with headers.
"""
#: If template is null; return an empty iterable
if self.template is None:
return []
return self._app_iter
@body.setter
def body(self, value):
#: If not response template; we have to assume its NO_CONTENT
#: hence do not allow setting the body
if self.template is None:
raise AssertionError("response_template is None; handler can't return a response")
#: value should be a subclass prestans.types.DataCollection
if not isinstance(value, DataCollection) and \
not isinstance(value, BinaryResponse):
msg = "%s is not a prestans.types.DataCollection or prestans.types.BinaryResponse subclass" % (
value.__class__.__name__
)
raise TypeError(msg)
#: Ensure that it matches the return type template
if not value.__class__ == self.template.__class__:
msg = "body must of be type %s, given %s" % (
self.template.__class__.__name__,
value.__class__.__name__
)
raise TypeError(msg)
#: If it's an array then ensure that element_template matches up
if isinstance(self.template, Array) and \
not isinstance(value.element_template, self.template.element_template.__class__):
msg = "array elements must of be type %s, given %s" % (
self.template.element_template.__class__.__name__,
value.element_template.__class__.__name__
)
raise TypeError(msg)
#: _app_iter assigned to value
#: we need to serialize the contents before we know the length
#: deffer the content_length property to be set by getter
self._app_iter = value
# body = property(_body__get, _body__set, _body__set)
def register_serializers(self, serializers):
"""
Adds extra serializers; generally registered during the handler lifecycle
"""
for new_serializer in serializers:
if not isinstance(new_serializer, serializer.Base):
msg = "registered serializer %s.%s does not inherit from prestans.serializer.Serializer" % (
new_serializer.__module__,
new_serializer.__class__.__name__
)
raise TypeError(msg)
self._serializers = self._serializers + serializers
def __call__(self, environ, start_response):
"""
Overridden WSGI application interface
"""
# prestans equivalent of webob.Response line 1022
if self.template is None or self.status_code == STATUS.NO_CONTENT:
self.content_type = None
start_response(self.status, self.headerlist)
if self.template is not None:
self.logger.warn("handler returns No Content but has a response_template; set template to None")
return []
# ensure what we are able to serialize is serializable
if not isinstance(self._app_iter, DataCollection) and \
not isinstance(self._app_iter, BinaryResponse):
if isinstance(self._app_iter, list):
app_iter_type = "list"
else:
app_iter_type = self._app_iter.__name__
msg = "handler returns content of type %s; not a prestans.types.DataCollection subclass" % (
app_iter_type
)
raise TypeError(msg)
if isinstance(self._app_iter, DataCollection):
#: See if attribute filter is completely invisible
if self.attribute_filter is not None:
#: Warning to say nothing is visible
if not self.attribute_filter.are_any_attributes_visible():
self.logger.warn("attribute_filter has all the attributes turned \
off, handler will return an empty response")
#: Warning to say none of the fields match
model_attribute_filter = None
if isinstance(self._app_iter, Array):
model_attribute_filter = AttributeFilter. \
from_model(self._app_iter.element_template)
elif isinstance(self._app_iter, Model):
model_attribute_filter = AttributeFilter. \
from_model(self._app_iter)
if model_attribute_filter is not None:
try:
model_attribute_filter.conforms_to_template_filter(self.attribute_filter)
except exception.AttributeFilterDiffers as exp:
exp.request = self.request
self.logger.warn("%s" % exp)
# body should be of type DataCollection try; attempt calling
# as_serializable with available attribute_filter
serializable_body = self._app_iter.as_serializable(self.attribute_filter.as_immutable(), self.minify)
#: attempt serializing via registered serializer
stringified_body = self._selected_serializer.dumps(serializable_body)
# if not isinstance(stringified_body, str):
# msg = "%s dumps must return a python str not %s" % (
# self._selected_serializer.__class__.__name__,
# stringified_body.__class__.__name__
# )
# raise TypeError(msg)
#: set content_length
self.content_length = len(stringified_body)
start_response(self.status, self.headerlist)
return [stringified_body.encode("utf-8")]
elif isinstance(self._app_iter, BinaryResponse):
if self._app_iter.content_length == 0 or \
self._app_iter.mime_type is None or \
self._app_iter.file_name is None:
msg = "Failed to write binary response with content_length %i; mime_type %s; file_name %s" % (
self._app_iter.content_length,
self._app_iter.mime_type,
self._app_iter.file_name
)
self.logger.warn(msg)
self.status = STATUS.INTERNAL_SERVER_ERROR
self.content_type = "text/plain"
return []
# set the content type
self.content_type = self._app_iter.mime_type
#: Add content disposition header
if self._app_iter.as_attachment:
attachment = "attachment; filename=\"%s\"" % self._app_iter.file_name
if not isinstance(attachment, str):
attachment = attachment.encode("latin1")
self.headers.add("Content-Disposition", attachment)
else:
inline = "inline; filename=\"%s\"" % self._app_iter.file_name
if not isinstance(inline, str):
inline = inline.encode("latin1")
self.headers.add("Content-Disposition", inline)
#: Write out response
self.content_length = self._app_iter.content_length
start_response(self.status, self.headerlist)
return [self._app_iter.contents]
else:
raise AssertionError("prestans failed to write a binary or textual response")
def __str__(self):
#: Overridden so webob's __str__ skips serializing the body
super(Response, self).__str__(skip_body=True)
|
bsd-3-clause
| -3,160,285,723,558,265,000
| 35.377717
| 113
| 0.590318
| false
| 4.57016
| false
| false
| false
|
DataViva/dataviva-scripts
|
scripts/comtrade/helpers/calc_rca.py
|
1
|
1782
|
import sys, os
import pandas as pd
import numpy as np
file_path = os.path.dirname(os.path.realpath(__file__))
ps_calcs_lib_path = os.path.abspath(os.path.join(file_path, "../../../lib/ps_calcs"))
sys.path.insert(0, ps_calcs_lib_path)
import ps_calcs
def calc_rca(ypw):
ubiquity_required = 20
diversity_required = 200
total_exports_required = 50000000
'''trim country list by diversity'''
origin_diversity = ypw.reset_index()
origin_diversity = origin_diversity["wld_id"].value_counts()
origin_diversity = origin_diversity[origin_diversity > diversity_required]
'''trim country list by total exports'''
origin_totals = ypw.groupby(level=['wld_id']).sum()
origin_totals = origin_totals['val_usd']
origin_totals = origin_totals[origin_totals > total_exports_required]
filtered_origins = set(origin_diversity.index).intersection(set(origin_totals.index))
'''trim product list by ubiquity'''
product_ubiquity = ypw.reset_index()
product_ubiquity = product_ubiquity[product_ubiquity['val_usd'] > 0]
product_ubiquity = product_ubiquity["hs_id"].value_counts()
product_ubiquity = product_ubiquity[product_ubiquity > ubiquity_required]
filtered_products = set(product_ubiquity.index)
'''re-calculate rcas'''
origins_to_drop = set(ypw.index.get_level_values('wld_id')).difference(filtered_origins)
products_to_drop = set(ypw.index.get_level_values('hs_id')).difference(filtered_products)
ypw = ypw.drop(list(origins_to_drop), axis=0, level='wld_id')
ypw = ypw.drop(list(products_to_drop), axis=0, level='hs_id')
ypw_rca = ypw.reset_index()
ypw_rca = ypw_rca.pivot(index="wld_id", columns="hs_id", values="val_usd")
ypw_rca = ps_calcs.rca(ypw_rca)
return ypw_rca.fillna(0)
|
mit
| 7,757,585,191,492,210,000
| 36.914894
| 93
| 0.69248
| false
| 2.874194
| false
| false
| false
|
copyninja/apt-offline
|
apt_offline_core/AptOfflineDebianBtsLib.py
|
1
|
10168
|
#!/usr/bin/env python
# debianbts.py - Methods to query Debian's BTS.
# Copyright (C) 2007-2010 Bastian Venthur <venthur@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Query Debian's Bug Tracking System (BTS).
This module provides a layer between Python and Debian's BTS. It provides
methods to query the BTS using the BTS' SOAP interface, and the Bugreport class
which represents a bugreport from the BTS.
"""
from datetime import datetime
import urllib
import urlparse
import SOAPpy
# Setup the soap server
# Default values
URL = 'http://bugs.debian.org/cgi-bin/soap.cgi'
NS = 'Debbugs/SOAP/V1'
BTS_URL = 'http://bugs.debian.org/'
def _get_http_proxy():
"""Returns an HTTP proxy URL formatted for consumption by SOAPpy.
SOAPpy does some fairly low-level HTTP manipulation and needs to be
explicitly made aware of HTTP proxy URLs, which also have to be
formatted without a schema or path.
"""
http_proxy = urllib.getproxies().get('http')
if http_proxy is None:
return None
return urlparse.urlparse(http_proxy).netloc
server = SOAPpy.SOAPProxy(URL, NS, http_proxy=_get_http_proxy())
class Bugreport(object):
"""Represents a bugreport from Debian's Bug Tracking System.
A bugreport object provides all attributes provided by the SOAP interface.
Most of the attributs are strings, the others are marked.
* bug_num: The bugnumber (int)
* severity: Severity of the bugreport
* tags: List of tags of the bugreport (list of strings)
* subject: The subject/title of the bugreport
* originator: Submitter of the bugreport
* mergedwith: List of bugnumbers this bug was merged with (list of ints)
* package: Package of the bugreport
* source: Source package of the bugreport
* date: Date of bug creation (datetime)
* log_modified: Date of update of the bugreport (datetime)
* done: Is the bug fixed or not (bool)
* archived: Is the bug archived or not (bool)
* unarchived: Was the bug unarchived or not (bool)
* fixed_versions: List of versions, can be empty even if bug is fixed (list of strings)
* found_versions: List of version numbers where bug was found (list of strings)
* forwarded: A URL or email address
* blocks: List of bugnumbers this bug blocks (list of ints)
* blockedby: List of bugnumbers which block this bug (list of ints)
* pending: Either 'pending' or 'done'
* msgid: Message ID of the bugreport
* owner: Who took responsibility for fixing this bug
* location: Either 'db-h' or 'archive'
* affects: List of Packagenames (list of strings)
* summary: Arbitrary text
"""
def __init__(self):
self.originator = None
self.date = None
self.subject = None
self.msgid = None
self.package = None
self.tags = None
self.done = None
self.forwarded = None
self.mergedwith = None
self.severity = None
self.owner = None
self.found_versions = None
self.fixed_versions = None
self.blocks = None
self.blockedby = None
self.unarchived = None
self.summary = None
self.affects = None
self.log_modified = None
self.location = None
self.archived = None
self.bug_num = None
self.source = None
self.pending = None
# The ones below are also there but not used
#self.fixed = None
#self.found = None
#self.fixed_date = None
#self.found_date = None
#self.keywords = None
#self.id = None
def __str__(self):
s = ""
for key, value in self.__dict__.iteritems():
if type(value) == type(unicode()):
value = value.encode('utf-8')
s += "%s: %s\n" % (key, str(value))
return s
def __cmp__(self, other):
"""Compare a bugreport with another.
The more open and and urgent a bug is, the greater the bug is:
outstanding > resolved > archived
critical > grave > serious > important > normal > minor > wishlist.
Openness always beats urgency, eg an archived bug is *always* smaller
than an outstanding bug.
This sorting is useful for displaying bugreports in a list and sorting
them in a useful way.
"""
myval = self._get_value()
otherval = other._get_value()
if myval < otherval:
return -1
elif myval == otherval:
return 0
else:
return 1
def _get_value(self):
if self.archived:
# archived and done
val = 0
elif self.done:
# not archived and done
val = 10
else:
# not done
val = 20
val += {u"critical" : 7,
u"grave" : 6,
u"serious" : 5,
u"important" : 4,
u"normal" : 3,
u"minor" : 2,
u"wishlist" : 1}[self.severity]
return val
def get_status(*nr):
"""Returns a list of Bugreport objects."""
reply = server.get_status(*nr)
# If we called get_status with one single bug, we get a single bug,
# if we called it with a list of bugs, we get a list,
# No available bugreports returns an enmpy list
bugs = []
if not reply:
pass
elif type(reply[0]) == type([]):
for elem in reply[0]:
bugs.append(_parse_status(elem))
else:
bugs.append(_parse_status(reply[0]))
return bugs
def get_usertag(email, *tags):
"""Return a dictionary of "usertag" => buglist mappings.
If tags are given the dictionary is limited to the matching tags, if no
tags are given all available tags are returned.
"""
reply = server.get_usertag(email, *tags)
# reply is an empty string if no bugs match the query
return dict() if reply == "" else reply._asdict()
def get_bug_log(nr):
"""Return a list of Buglogs.
A buglog is a dictionary with the following mappings:
"header" => string
"body" => string
"attachments" => list
"msg_num" => int
"""
reply = server.get_bug_log(nr)
buglog = [i._asdict() for i in reply._aslist()]
for b in buglog:
b["header"] = _uc(b["header"])
b["body"] = _uc(b["body"])
b["msg_num"] = int(b["msg_num"])
b["attachments"] = b["attachments"]._aslist()
return buglog
def newest_bugs(amount):
"""Returns a list of bugnumbers of the `amount` newest bugs."""
reply = server.newest_bugs(amount)
return reply._aslist()
def get_bugs(*key_value):
"""Returns a list of bugnumbers, that match the conditions given by the
key-value pair(s).
Possible keys are:
"package": bugs for the given package
"submitter": bugs from the submitter
"maint": bugs belonging to a maintainer
"src": bugs belonging to a source package
"severity": bugs with a certain severity
"status": can be either "done", "forwarded", or "open"
"tag": see http://www.debian.org/Bugs/Developer#tags for available tags
"owner": bugs which are assigned to `owner`
"bugs": takes list of bugnumbers, filters the list according to given criteria
"correspondent": bugs where `correspondent` has sent a mail to
Example: get_bugs('package', 'gtk-qt-engine', 'severity', 'normal')
"""
reply = server.get_bugs(*key_value)
return reply._aslist()
def _parse_status(status):
"""Return a bugreport object from a given status."""
status = status._asdict()
bug = Bugreport()
tmp = status['value']
bug.originator = _uc(tmp['originator'])
bug.date = datetime.utcfromtimestamp(tmp['date'])
bug.subject = _uc(tmp['subject'])
bug.msgid = _uc(tmp['msgid'])
bug.package = _uc(tmp['package'])
bug.tags = _uc(tmp['tags']).split()
bug.done = bool(tmp['done'])
bug.forwarded = _uc(tmp['forwarded'])
bug.mergedwith = [int(i) for i in str(tmp['mergedwith']).split()]
bug.severity = _uc(tmp['severity'])
bug.owner = _uc(tmp['owner'])
bug.found_versions = [_uc(str(i)) for i in tmp['found_versions']]
bug.fixed_versions = [_uc(str(i)) for i in tmp['fixed_versions']]
bug.blocks = [int(i) for i in str(tmp['blocks']).split()]
bug.blockedby = [int(i) for i in str(tmp['blockedby']).split()]
bug.unarchived = bool(tmp["unarchived"])
bug.summary = _uc(tmp['summary'])
affects = tmp['affects'].strip()
bug.affects = [_uc(i.strip()) for i in affects.split(',')] if affects else []
bug.log_modified = datetime.utcfromtimestamp(tmp['log_modified'])
bug.location = _uc(tmp['location'])
bug.archived = bool(tmp["archived"])
bug.bug_num = int(tmp['bug_num'])
bug.source = _uc(tmp['source'])
bug.pending = _uc(tmp['pending'])
# Also available, but unused or broken
#bug.fixed = _parse_crappy_soap(tmp, "fixed")
#bug.found = _parse_crappy_soap(tmp, "found")
#bug.found_date = [datetime.utcfromtimestamp(i) for i in tmp["found_date"]]
#bug.fixed_date = [datetime.utcfromtimestamp(i) for i in tmp["fixed_date"]]
#bug.keywords = _uc(tmp['keywords']).split()
#bug.id = int(tmp['id'])
return bug
def _uc(string):
"""Convert string to unicode.
This method only exists to unify the unicode conversion in this module.
"""
return unicode(string, 'utf-8', 'replace')
|
gpl-3.0
| 842,525,302,459,896,700
| 33.006689
| 91
| 0.624803
| false
| 3.747881
| false
| false
| false
|
almorel/lab
|
python/pycuda/main.py
|
1
|
7262
|
#!/usr/bin/env python
# Mandelbrot calculate using GPU, Serial numpy and faster numpy
# Use to show the speed difference between CPU and GPU calculations
# ian@ianozsvald.com July 2010
# Based on vegaseat's TKinter/numpy example code from 2006
# http://www.daniweb.com/code/snippet216851.html#
# with minor changes to move to numpy from the obsolete Numeric
import sys
import numpy as nm
import Tkinter as tk
import Image # PIL
import ImageTk # PIL
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
# set width and height of window, more pixels take longer to calculate
w = 1000
h = 1000
from pycuda.elementwise import ElementwiseKernel
complex_gpu = ElementwiseKernel(
"pycuda::complex<float> *z, pycuda::complex<float> *q, int *iteration, int maxiter",
"for (int n=0; n < maxiter; n++) {z[i] = (z[i]*z[i])+q[i]; if (abs(z[i]) > 2.0f) {iteration[i]=n; z[i] = pycuda::complex<float>(); q[i] = pycuda::complex<float>();};}",
"complex5",
preamble="#include <pycuda-complex.hpp>",)
def calculate_z_gpu(q, maxiter, z):
output = nm.resize(nm.array(0,), q.shape)
q_gpu = gpuarray.to_gpu(q.astype(nm.complex64))
z_gpu = gpuarray.to_gpu(z.astype(nm.complex64))
iterations_gpu = gpuarray.to_gpu(output)
# the for loop and complex calculations are all done on the GPU
# we bring the iterations_gpu array back to determine pixel colours later
complex_gpu(z_gpu, q_gpu, iterations_gpu, maxiter)
iterations = iterations_gpu.get()
return iterations
def calculate_z_numpy_gpu(q, maxiter, z):
"""Calculate z using numpy on the GPU via gpuarray"""
outputg = gpuarray.to_gpu(nm.resize(nm.array(0,), q.shape).astype(nm.int32))
zg = gpuarray.to_gpu(z.astype(nm.complex64))
qg = gpuarray.to_gpu(q.astype(nm.complex64))
# 2.0 as an array
twosg = gpuarray.to_gpu(nm.array([2.0]*zg.size).astype(nm.float32))
# 0+0j as an array
cmplx0sg = gpuarray.to_gpu(nm.array([0+0j]*zg.size).astype(nm.complex64))
# for abs_zg > twosg result
comparison_result = gpuarray.to_gpu(nm.array([False]*zg.size).astype(nm.bool))
# we'll add 1 to iterg after each iteration
iterg = gpuarray.to_gpu(nm.array([0]*zg.size).astype(nm.int32))
for iter in range(maxiter):
zg = zg*zg + qg
# abs returns a complex (rather than a float) from the complex
# input where the real component is the absolute value (which
# looks like a bug) so I take the .real after abs()
abs_zg = abs(zg).real
comparison_result = abs_zg > twosg
qg = gpuarray.if_positive(comparison_result, cmplx0sg, qg)
zg = gpuarray.if_positive(comparison_result, cmplx0sg, zg)
outputg = gpuarray.if_positive(comparison_result, iterg, outputg)
iterg = iterg + 1
output = outputg.get()
return output
def calculate_z_numpy(q, maxiter, z):
# calculate z using numpy, this is the original
# routine from vegaseat's URL
# NOTE this routine was faster using a default of double-precision complex nbrs
# rather than the current single precision
output = nm.resize(nm.array(0,), q.shape).astype(nm.int32)
for iter in range(maxiter):
z = z*z + q
done = nm.greater(abs(z), 2.0)
q = nm.where(done,0+0j, q)
z = nm.where(done,0+0j, z)
output = nm.where(done, iter, output)
return output
def calculate_z_serial(q, maxiter, z):
# calculate z using pure python with numpy arrays
# this routine unrolls calculate_z_numpy as an intermediate
# step to the creation of calculate_z_gpu
# it runs slower than calculate_z_numpy
output = nm.resize(nm.array(0,), q.shape).astype(nm.int32)
for i in range(len(q)):
if i % 100 == 0:
# print out some progress info since it is so slow...
print "%0.2f%% complete" % (1.0/len(q) * i * 100)
for iter in range(maxiter):
z[i] = z[i]*z[i] + q[i]
if abs(z[i]) > 2.0:
q[i] = 0+0j
z[i] = 0+0j
output[i] = iter
return output
show_instructions = False
if len(sys.argv) == 1:
show_instructions = True
if len(sys.argv) > 1:
if sys.argv[1] not in ['gpu', 'gpuarray', 'numpy', 'python']:
show_instructions = True
if show_instructions:
print "Usage: python mandelbrot.py [gpu|gpuarray|numpy|python]"
print "Where:"
print " gpu is a pure CUDA solution on the GPU"
print " gpuarray uses a numpy-like CUDA wrapper in Python on the GPU"
print " numpy is a pure Numpy (C-based) solution on the CPU"
print " python is a pure Python solution on the CPU with numpy arrays"
sys.exit(0)
routine = {'gpuarray':calculate_z_numpy_gpu,
'gpu':calculate_z_gpu,
'numpy':calculate_z_numpy,
'python':calculate_z_serial}
calculate_z = routine[sys.argv[1]]
##if sys.argv[1] == 'python':
# import psyco
# psyco.full()
# Using a WinXP Intel Core2 Duo 2.66GHz CPU (1 CPU used)
# with a 9800GT GPU I get the following timings (smaller is better).
# With 200x200 problem with max iterations set at 300:
# calculate_z_gpu: 0.03s
# calculate_z_serial: 8.7s
# calculate_z_numpy: 0.3s
#
# Using WinXP Intel 2.9GHz CPU (1 CPU used)
# with a GTX 480 GPU I get the following using 1000x1000 plot with 1000 max iterations:
# gpu: 0.07s
# gpuarray: 3.4s
# numpy: 43.4s
# python (serial): 1605.6s
class Mandelbrot(object):
def __init__(self):
# create window
self.root = tk.Tk()
self.root.title("Mandelbrot Set")
self.create_image()
self.create_label()
# start event loop
self.root.mainloop()
def draw(self, x1, x2, y1, y2, maxiter=300):
# draw the Mandelbrot set, from numpy example
xx = nm.arange(x1, x2, (x2-x1)/w*2)
yy = nm.arange(y2, y1, (y1-y2)/h*2) * 1j
# force yy, q and z to use 32 bit floats rather than
# the default 64 doubles for nm.complex for consistency with CUDA
yy = yy.astype(nm.complex64)
q = nm.ravel(xx+yy[:, nm.newaxis]).astype(nm.complex64)
z = nm.zeros(q.shape, nm.complex64)
start_main = drv.Event()
end_main = drv.Event()
start_main.record()
output = calculate_z(q, maxiter, z)
end_main.record()
end_main.synchronize()
secs = start_main.time_till(end_main)*1e-3
print "Main took", secs
output = (output + (256*output) + (256**2)*output) * 8
# convert output to a string
self.mandel = output.tostring()
def create_image(self):
""""
create the image from the draw() string
"""
self.im = Image.new("RGB", (w/2, h/2))
# you can experiment with these x and y ranges
self.draw(-2.13, 0.77, -1.3, 1.3, 1000)
self.im.fromstring(self.mandel, "raw", "RGBX", 0, -1)
def create_label(self):
# put the image on a label widget
self.image = ImageTk.PhotoImage(self.im)
self.label = tk.Label(self.root, image=self.image)
self.label.pack()
# test the class
if __name__ == '__main__':
test = Mandelbrot()
|
gpl-3.0
| -6,831,566,313,177,774,000
| 34.42439
| 180
| 0.631644
| false
| 3.171179
| false
| false
| false
|
pbanaszkiewicz/amy
|
amy/workshops/management/commands/instructors_activity.py
|
1
|
5300
|
import logging
import os
from django.core.mail import send_mail
from django.core.management.base import BaseCommand
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = "Report instructors activity."
def add_arguments(self, parser):
parser.add_argument(
"--send-out-for-real",
action="store_true",
default=False,
help="Send information to the instructors.",
)
parser.add_argument(
"--no-may-contact-only",
action="store_true",
default=False,
help="Include instructors not willing to be contacted.",
)
parser.add_argument(
"--django-mailing",
action="store_true",
default=False,
help="Use Django mailing system. This requires some environmental "
"variables to be set, see `settings.py`.",
)
parser.add_argument(
"-s",
"--sender",
action="store",
default="workshops@carpentries.org",
help='E-mail used in "from:" field.',
)
def foreign_tasks(self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related("person")
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=["instructor", "helper"])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related("airport").prefetch_related(
"task_set", "lessons", "award_set", "badges"
)
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles).select_related(
"event", "role"
)
record = {
"person": person,
"lessons": person.lessons.all(),
"instructor_awards": person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
"tasks": zip(tasks, self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template("mailing/instructor_activity.txt")
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return "Updating your Software Carpentry information"
def recipient(self, record):
return record["person"].email
def send_message(
self, subject, message, sender, recipient, for_real=False, django_mailing=False
):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, "w")
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write("-" * 40 + "\n")
self.stdout.write("To: {}\n".format(recipient))
self.stdout.write("Subject: {}\n".format(subject))
self.stdout.write("From: {}\n".format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + "\n")
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
send_for_real = options["send_out_for_real"]
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options["no_may_contact_only"]
# use mailing options from settings.py or the `mail` system command?
django_mailing = options["django_mailing"]
# verbosity option is added by Django
self.verbosity = int(options["verbosity"])
sender = options["sender"]
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(
subject,
message,
sender,
recipient,
for_real=send_for_real,
django_mailing=django_mailing,
)
if self.verbosity >= 1:
self.stdout.write("Sent {} emails.\n".format(len(results)))
|
mit
| -337,196,337,048,741,500
| 32.757962
| 87
| 0.561698
| false
| 4.380165
| false
| false
| false
|
gdetor/SI-RF-Structure
|
Statistics/bivariate.py
|
1
|
5235
|
# Copyright (c) 2014, Georgios Is. Detorakis (gdetor@gmail.com) and
# Nicolas P. Rougier (nicolas.rougier@inria.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script illustrated the bivariate plot presented in [1].
import math
import numpy as np
import matplotlib
matplotlib.use('macosx')
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
import matplotlib.patheffects as PathEffects
matplotlib.rc('xtick', direction = 'out')
matplotlib.rc('ytick', direction = 'out')
matplotlib.rc('xtick.major', size = 8, width=1)
matplotlib.rc('xtick.minor', size = 4, width=1)
matplotlib.rc('ytick.major', size = 8, width=1)
matplotlib.rc('ytick.minor', size = 4, width=1)
matplotlib.rc('text', usetex=True )
matplotlib.rc('font', serif='Times')
#indices = [(3, 18) , (26, 18) , (10, 7) , (25, 11) , (3, 21) , (8, 11) , (21, 14) , (20, 16) , (8, 19) , (16, 5) , (0, 9) , (17, 15) , (7, 20) , (20, 0) , (27, 19) , (4, 24) ]
indices = [(10, 21) , (29, 16) , (28, 14) , (20, 17) , (13, 19) , (3, 15) , (23, 18) , (0, 18) , (8, 31) , (16, 11) , (0, 20) , (24, 13) , (11, 2) , (1, 1) , (19, 20) , (2, 21)]
if __name__=='__main__':
Z = np.load('areas-ref.npy')
X, Y = Z[:,0], Z[:,1]
fig = plt.figure(figsize=(8,8), facecolor="white")
ax = plt.subplot(1,1,1,aspect=1)
plt.scatter(X+0.01,Y+0.01,s=3, edgecolor='k', facecolor='k')
# Show some points
I = [a*32+b for (a,b) in indices]
# I = [3,143,149,189,1,209,192,167,64,87,10,40,68,185,61,198]
plt.scatter(X[I],Y[I],s=5,color='k')
for i in range(len(I)):
x,y = X[i],Y[i]
letter = ord('A')+i
plt.scatter(X[I[i]], Y[I[i]], s=40, facecolor='None', edgecolor='k')
# label = plt.annotate(" %c" % (chr(letter)), (x+.25,y+.25), weight='bold', fontsize=16,
# path_effects=[PathEffects.withStroke(linewidth=2, foreground="w", alpha=.75)])
plt.annotate(" %c" % (chr(ord('A')+i)), (X[I[i]]+.25,Y[I[i]]+.25), weight='bold')
# Select some points by cliking them
# letter = ord('A')
# def onclick(event):
# global letter
# #print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# # event.button, event.x, event.y, event.xdata, event.ydata)
# C = (X-event.xdata)**2 + (Y-event.ydata)**2
# I = np.argmin(C)
# plt.ion()
# x,y = X[I],Y[I]
# # print x, y, I, np.unravel_index(I,(32,32))
# print np.unravel_index(I,(32,32)), ",",
# plt.scatter(x, y, s=40, facecolor='None', edgecolor='k')
# label = plt.annotate(" %c" % (chr(letter)), (x+.25,y+.25), weight='bold', fontsize=16,
# path_effects=[PathEffects.withStroke(linewidth=2, foreground="w", alpha=.75)])
# #label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65 ))
# plt.ioff()
# letter = letter+1
# cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.xlabel(r'Excitatory area (mm2)')
plt.ylabel(r'Inhibitory area (mm2')
plt.xscale('log')
plt.yscale('log')
plt.xticks([5,10,30], ['5','10','30'])
plt.yticks([5,10,30], ['5','10','30'])
plt.xlim(5,30)
plt.ylim(5,30)
plt.text(5.5,26, "n = 1024")
plt.plot([1,100],[1,100], ls='--', color='k')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.savefig('bivariate.pdf', dpi=72)
plt.show()
|
gpl-3.0
| 5,246,652,282,058,843,000
| 42.625
| 177
| 0.634193
| false
| 3.029514
| false
| false
| false
|
asmaps/nsupdate.info
|
nsupdate/main/models.py
|
1
|
4197
|
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.conf import settings
from django.db.models.signals import post_delete
from django.contrib.auth.hashers import make_password
from main import dnstools
import dns.resolver
from datetime import datetime
import re
class BlacklistedDomain(models.Model):
domain = models.CharField(
max_length=256,
unique=True,
help_text='Blacklisted domain. Evaluated as regex (search).')
last_update = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return u"%s" % (self.domain, )
def domain_blacklist_validator(value):
for bd in BlacklistedDomain.objects.all():
if re.search(bd.domain, value):
raise ValidationError(u'This domain is not allowed')
class Domain(models.Model):
domain = models.CharField(max_length=256, unique=True)
nameserver_ip = models.IPAddressField(max_length=256,
help_text="An IP where the nsupdates for this domain will be sent to")
nameserver_update_key = models.CharField(max_length=256)
last_update = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return u"%s" % (self.domain, )
class Host(models.Model):
"""TODO: hash update_secret on save (if not already hashed)"""
subdomain = models.CharField(max_length=256, validators=[
RegexValidator(
regex=r'^(([a-z0-9][a-z0-9\-]*[a-z0-9])|[a-z0-9])$',
message='Invalid subdomain: only "a-z", "0-9" and "-" is allowed'
),
domain_blacklist_validator])
domain = models.ForeignKey(Domain)
update_secret = models.CharField(max_length=256) # gets hashed on save
comment = models.CharField(
max_length=256, default='', blank=True, null=True)
last_update = models.DateTimeField(auto_now=True)
last_api_update = models.DateTimeField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='hosts')
def __unicode__(self):
return u"%s.%s - %s" % (
self.subdomain, self.domain.domain, self.comment)
class Meta:
unique_together = (('subdomain', 'domain'),)
def get_fqdn(self):
return '%s.%s' % (self.subdomain, self.domain.domain)
@classmethod
def filter_by_fqdn(cls, fqdn, **kwargs):
# Assuming subdomain has no dots (.) the fqdn is split at the first dot
splitted = fqdn.split('.', 1)
if not len(splitted) == 2:
raise NotImplemented("FQDN has to contain a dot")
return Host.objects.filter(
subdomain=splitted[0], domain__domain=splitted[1], **kwargs)
def getIPv4(self):
try:
return dnstools.query_ns(self.get_fqdn(), 'A')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
return ''
def getIPv6(self):
try:
return dnstools.query_ns(self.get_fqdn(), 'AAAA')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
return ''
def poke(self):
self.last_api_update = datetime.now()
self.save()
def generate_secret(self):
# note: we use a quick hasher for the update_secret as expensive
# more modern hashes might put too much load on the servers. also
# many update clients might use http without ssl, so it is not too
# secure anyway.
secret = User.objects.make_random_password()
self.update_secret = make_password(
secret,
hasher='sha1'
)
self.save()
return secret
def post_delete_host(sender, **kwargs):
obj = kwargs['instance']
dnstools.delete(obj.get_fqdn())
post_delete.connect(post_delete_host, sender=Host)
|
bsd-3-clause
| 9,057,991,372,963,181,000
| 33.68595
| 90
| 0.653562
| false
| 3.774281
| false
| false
| false
|
efornal/pulmo
|
app/migrations/0004_objectives_and_targets_connection.py
|
1
|
2224
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0003_create_model_productionform'),
]
operations = [
migrations.CreateModel(
name='ConnectionSource',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('ip', models.CharField(max_length=200, null=True)),
('observations', models.TextField(null=True, blank=True)),
],
options={
'db_table': 'connection_source',
'verbose_name_plural': 'ConnectionSources',
},
),
migrations.CreateModel(
name='ConnectionTarget',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('ip', models.CharField(max_length=200, null=True)),
('ip_firewall', models.CharField(max_length=200, null=True)),
('observations', models.TextField(null=True, blank=True)),
],
options={
'db_table': 'connection_target',
'verbose_name_plural': 'ConnectionTargets',
},
),
migrations.AddField(
model_name='applicationform',
name='connection_sources',
field=models.ManyToManyField(to='app.ConnectionSource', blank=True),
),
migrations.AddField(
model_name='applicationform',
name='connection_targets',
field=models.ManyToManyField(to='app.ConnectionTarget', blank=True),
),
migrations.AddField(
model_name='productionform',
name='connection_sources',
field=models.ManyToManyField(to='app.ConnectionSource', blank=True),
),
migrations.AddField(
model_name='productionform',
name='connection_targets',
field=models.ManyToManyField(to='app.ConnectionTarget', blank=True),
),
]
|
gpl-3.0
| -2,067,851,419,938,858,500
| 35.459016
| 80
| 0.547212
| false
| 4.742004
| false
| false
| false
|
FluidityProject/fluidity
|
tools/optimality.py
|
2
|
32601
|
#!/usr/bin/python3
# Copyright (C) 2006 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a full list
# of copyright holders.
#
# Prof. C Pain
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# amcgsoftware@imperial.ac.uk
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os.path
import numpy
import argparse
import shlex
from subprocess import Popen, PIPE
import scipy.optimize
import string
import libspud
from fluidity_tools import stat_parser
from fluidity_tools import stat_creator
import time
import pickle
import glob
import math
import shutil
# Hack for libspud to be able to read an option from a different files.
# A better solution would be to fix libspud or use an alternative implementation like
# https://github.com/gmarkall/manycore_form_compiler/blob/master/mcfc/optionfile.py
def superspud(filename, cmd):
libspud.load_options(filename)
r = None
if hasattr(cmd, '__iter__'):
for c in cmd:
exec("try: r = " + c + "\nexcept libspud.SpudNewKeyWarning: pass")
else:
exec("try: r = " + cmd + "\nexcept libspud.SpudNewKeyWarning: pass")
libspud.clear_options()
return r
# Executes the model specified in the optimality option tree
# The model stdout is printed to stdout.
def run_model(m, opt_options, model_options):
update_custom_controls(m, opt_options)
if (superspud(model_options, "libspud.have_option('/adjoint/controls/load_controls')")):
# If the model is loading the default controls, we need to make suer the control files are up to date:
update_default_controls(m, opt_options, model_options)
command_line = superspud(opt_options, "libspud.get_option('/model/command_line')")
option_file = superspud(opt_options, "libspud.get_option('/model/option_file')")
args = shlex.split(command_line)
args.append(option_file)
p = Popen(args, stdout=PIPE,stderr=PIPE)
out = string.join(p.stdout.readlines() )
outerr = string.join(p.stderr.readlines() )
if p.wait() != 0:
print("Model execution failed.")
print("The error was:")
print(outerr)
exit()
if verbose:
print("Model output: ")
print(out)
# Intialises the custom controls using the supplied python code.
def get_custom_controls(opt_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# With the custom type, the user specifies python function to initialise the controls.
if ctype == 'custom':
initial_control_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/initial_control')")
d = {}
exec(initial_control_code in d)
m[cname] = d['initial_control']()
return m
# Initialse the default controls by reading in the control files.
# This assumes that the model has been run without the "/adjoint/load_controls" option (which produced the initial control files).
def read_default_controls(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
act_flag = False # Check that at least one control file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_'+cname+ '_[0-9]*.pkl'):
try:
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_'+ cname+ '_'):len(ctrl_file)-4])
except:
print("Error while reading the control files.")
print("The control file ", ctrl_file, " does not conform the standard naming conventions for control files.")
exit()
f = open(ctrl_file, 'rb')
m[(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print("Warning: Found no control file for control ", cname, ".")
return m
# Initialse the default controli bounds by reading in the control bound files.
# This assumes that the model has been run without the "/adjoint/load_controls" option (which produced the initial control bound files).
def read_default_control_bounds(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m_bounds = {"lower_bound": {}, "upper_bound": {}}
# Loop over controls
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype != 'default':
continue
have_bound = {}
# Loop over lower and upper bound
for k in m_bounds.keys():
have_bound[k] = superspud(model_options, "libspud.have_option('/adjoint/controls/control["+str(i)+"/bounds/"+k+"')")
if not have_bound[k]:
continue
act_flag = False # Check that at least one control bound file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_'+cname+ '_'+k+'_[0-9]*.pkl'):
try:
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_'+ cname+ '_'+k+'_'):len(ctrl_file)-4])
except:
print("Error while reading the control bound files.")
print("The control bound file ", ctrl_file, " does not conform the standard naming conventions for control files.")
exit()
f = open(ctrl_file, 'rb')
m_bounds[k][(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print("Warning: Found no control bound file for control ", cname, ".")
return m_bounds
# Completes the control bounds by adding the missing controls and filling them with nan's
def complete_default_control_bounds(m, m_bounds):
bound_types = {"lower_bound": {}, "upper_bound": {}}
for bound_type in bound_types:
for control in m.keys():
if m_bounds[bound_type].has_key(control):
continue
# We need objects as dtype because we want to keep the Nones for later
m_bounds[bound_type][control] = numpy.empty(shape = m[control].shape, dtype=object)
m_bounds[bound_type][control].fill(None)
return m_bounds
# Returns the control derivatives for both the custom and the default controls.
def read_control_derivatives(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
functional_name = superspud(opt_options, "libspud.get_option('/functional/name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
derivs = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
act_flag = False # Check that at least one control file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_adjoint_'+functional_name+'_'+ cname+ '_TotalDerivative_[0-9]*.pkl'):
try:
# The naming convenction is control+simulation_name+control_name+TotalDerivative, but do not forget that
# the derivatives where produced during the adjoint run in which the simulation name is simulation_name+functional_name
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_adjoint_'+functional_name+'_'+ cname+ '_TotalDerivative_'):len(ctrl_file)-4])
except:
print("Error while reading the control derivative files.")
print("The control file ", ctrl_file, " does not conform the standard naming conventions for control files.")
exit()
f = open(ctrl_file, 'rb')
derivs[(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print("Warning: Found no control derivative file for control ", cname, ".")
elif ctype == 'custom':
control_derivative_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/control_derivative')")
d = {}
exec(control_derivative_code in d)
derivs[cname] = d['control_derivative']()
else:
print("Unknown control type " + ctype + ".")
exit()
return derivs
# Writes the custom controls onto disk
def update_custom_controls(m, opt_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# With the custom type, the user specifies a python function to update the controls.
if ctype == 'custom':
update_control_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/update_control')")
d = {}
exec(update_control_code in d)
d['update_control'](m[cname])
# Writes the default controls onto disk
def update_default_controls(m, opt_options, model_options):
global debug
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
# Loop over default controls
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
# Loop over controls
for k in m.keys():
# Check if that is a control we are looking for
if k[0] == cname:
timestep = k[1]
file_name = 'control_'+simulation_name + '_' + cname + '_' + str(timestep) + '.pkl'
if not os.path.isfile(file_name):
print("Error: writing control file ", file_name, " which did not exist before.")
exit()
if debug:
# Check that the file we are writing has the same shape than the one we are writing
f = open(file_name, 'rb')
m_old = pickle.load(f)
if m[k].shape != m_old.shape:
print("Error: The shape of the control in ", file_name, " changed.")
exit()
f.close()
f = open(file_name, 'wb')
pickle.dump(m[k], f)
f.close()
# Check the consistency of model and option file
def check_option_consistency(opt_options, model_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# Check that the default controls exist in the model
# and that custom controls not.
if ctype == 'custom':
if superspud(model_options, "libspud.have_option('/adjoint/controls/control::" + cname + "')"):
print("The custom control " + cname + " is a default control in the model option tree.")
exit()
elif ctype== 'default':
if not superspud(model_options, "libspud.have_option('/adjoint/controls/control::" + cname + "')"):
print("The default control " + cname + " was not found in the model option tree.")
exit()
else:
print("Unknown control type " + ctype + ".")
exit()
# Check that the the controls in dJdm are consistent with the ones in m
# If m_bounds is present, it also checks the consistency of the bounds
def check_control_consistency(m, djdm, m_bounds=None):
djdm_keys = djdm.keys()
m_keys = m.keys()
djdm_keys.sort()
m_keys.sort()
if m_keys != djdm_keys:
print("Error: The controls are not consistent with the controls derivatives.")
print("The controls are:", m_keys)
print("The control derivatives are:", djdm_keys)
print("Check the consistency of the control definition in the model and the optimality configuration.")
exit()
for k, v in sorted(m.items()):
if m[k].shape != djdm[k].shape:
print("The control ", k, " has shape ", m[k].shape, " but dJd(", k, ") has shape ", djdm[k].shape)
exit()
# Check the bounds
if m_bounds!=None:
bound_types = ("lower_bound", "upper_bound")
for bound_type in bound_types:
m_bounds_keys = m_bounds[bound_type].keys()
m_bounds_keys.sort()
if m_keys != m_bounds_keys:
print("Error: The controls are not consistent with the control ", bound_type, ".")
print("The controls are:", m_keys)
print("The control ", bound_type, "s are:", m_bounds_keys)
exit()
for k, v in sorted(m.items()):
if m[k].shape != m_bounds[bound_type][k].shape:
print("The control ", k, " has shape ", m[k].shape, " but the ", bound_type, " has shape ", m_bounds[bound_type][k].shape)
exit()
def delete_temporary_files(model_options):
# remove any control files
pkl_files = glob.glob('control_*.pkl')
for f in pkl_files:
os.remove(f)
# remove any stat files from the model
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
stat_files = glob.glob(simulation_name+'*.stat')
for f in stat_files:
os.remove(f)
# Returns true if bounds are specified for one of the controls
def have_bounds(opt_options, model_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
have_bounds = False
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
have_bounds = have_bounds or superspud(model_options, "libspud.have_option('/adjoint/controls/control["+cname+"]/bounds')")
return have_bounds
#######################################################
################# Optimisation loop ###################
#######################################################
def optimisation_loop(opt_options, model_options):
# Implement a memoization function to avoid duplicated functional (derivative) evaluations
class MemoizeMutable:
def __init__(self, fn):
self.fn = fn
self.memo = {}
def __call__(self, *args, **kwds):
import cPickle
str = cPickle.dumps(args, 1)+cPickle.dumps(kwds, 1)
if not self.memo.has_key(str):
self.memo[str] = self.fn(*args, **kwds)
return self.memo[str]
def has_cache(self, *args, **kwds):
import cPickle
str = cPickle.dumps(args, 1)+cPickle.dumps(kwds, 1)
return str in self.memo
# Insert a function value into the cache manually.
def __add__(self, value, *args, **kwds):
import cPickle
str = cPickle.dumps(args, 1)+cPickle.dumps(kwds, 1)
self.memo[str] = value
# Small test code for the un/serialiser
def test_serialise():
x = {'a': numpy.random.rand(3,2), 'b': numpy.random.rand(3,2,4,5), 'c': numpy.random.rand(1)}
[m_serial, m_shape] = serialise(x)
x_re = unserialise(m_serial, m_shape)
return (x['a'] == x_re['a']).all() and (x['b'] == x_re['b']).all() and (x['c'] == x_re['c']).all()
# This function takes in a dictionary m with numpy.array as entries.
# From that it creates one serialised numpy.array with all the data.
# In addition it creates m_shape, a dictionary which is used in unserialise.
def serialise(m):
m_serial = numpy.array([])
m_shape = {}
for k, v in sorted(m.items()):
m_serial = numpy.append(m_serial, v.flatten())
m_shape[k] = v.shape
return [m_serial, m_shape]
# Reconstructs the original dictionary of numpy.array's from the serialised version and the shape.
def unserialise(m_serial, m_shape):
m = {}
start_index = 0
for k, s in sorted(m_shape.items()):
offset = 1
for d in s:
offset = offset * d
end_index = start_index + offset
m[k] = numpy.reshape(m_serial[start_index:end_index], s)
start_index = end_index
return m
# Returns the functional value with the current controls
def J(m_serial, m_shape, write_stat=True):
has_cache = mem_pure_J.has_cache(m_serial, m_shape)
if has_cache:
cache_str = "(cache hit)"
else:
cache_str = ""
J = mem_pure_J(m_serial, m_shape)
print("J = %s %s" % (J, cache_str))
if write_stat:
# Update the functional value in the optimisation stat file
stat_writer[(functional_name, 'value')] = J
return J
# A pure version of the computation of J
def pure_J(m_serial, m_shape):
if verbose:
print("Running forward model for functional evaluation (<function pure_J>)")
m = unserialise(m_serial, m_shape)
run_model(m, opt_options, model_options)
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
stat_file = simulation_name+".stat"
s = stat_parser(stat_file)
if not functional_name in s:
print("The functional '", functional_name, "' does not exist in the stat file.")
print("Check your model configuration")
exit()
J = s[functional_name]["value"][-1]
return J
# Returns the functional derivative with respect to the controls.
def dJdm(m_serial, m_shape, write_stat=True):
return mem_pure_dJdm(m_serial, m_shape)
# A pure version of the computation of J
def pure_dJdm(m_serial, m_shape):
if verbose:
print("Running forward/adjoint model for functional derivative evaluation (<function pure_dJdm>)")
m = unserialise(m_serial, m_shape)
run_model(m, opt_options, model_options)
# While computing dJdm we run the forward/adjoint model and in particular we compute the
# functional values. In order to not compute the functional values again when calling
# J, we manually add write it into the memoize cache.
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
stat_file = simulation_name+".stat"
J = stat_parser(stat_file)[functional_name]["value"][-1]
# Add the functional value the memJ's cache
mem_pure_J.__add__(J, m_serial, m_shape)
# Now get the functional derivative information
djdm = read_control_derivatives(opt_options, model_options)
check_control_consistency(m, djdm, m_bounds)
# Serialise djdm in the same order than m_serial
djdm_serial = []
for k, v in sorted(m_shape.items()):
djdm_serial = numpy.append(djdm_serial, djdm[k])
return djdm_serial
# Check the gradient using the Taylor expansion
def check_gradient(m_serial, m_shape):
print('-' * 80)
print(' Entering gradient verification ')
print('-' * 80)
fd_errors = []
fd_conv = []
grad_errors = []
grad_conv = []
nb_tests = 4
perturbation = 2e-4
perturbation_vec = numpy.random.rand(len(m_serial))
j_unpert = J(m_serial, m_shape)
djdm_unpert = dJdm(m_serial, m_shape)
for i in range(nb_tests):
perturbation = perturbation/2
m_pert = m_serial + perturbation*perturbation_vec
fd_errors.append(abs(j_unpert - J(m_pert, m_shape)))
grad_errors.append(abs(j_unpert + numpy.dot(djdm_unpert, perturbation_vec*perturbation) - J(m_pert, m_shape)))
print("Error in Taylor expansion of order 0: ", fd_errors)
print("Error in Taylor expansion of order 1: ", grad_errors)
for i in range(nb_tests-1):
if fd_errors[i+1] == 0.0 or fd_errors[i] == 0.0:
fd_conv.append(1.0)
else:
fd_conv.append(math.log(fd_errors[i]/fd_errors[i+1], 2))
if grad_errors[i+1] == 0.0 or grad_errors[i] == 0.0:
grad_conv.append(2.0)
else:
grad_conv.append(math.log(grad_errors[i]/grad_errors[i+1], 2))
print("Convergence of Taylor expansion of order 0 (should be 1.0): ", fd_conv)
print("Convergence of Taylor expansion of order 1 (should be 2.0): ", grad_conv)
stat_writer[(functional_name, "iteration")] = 0
stat_writer[(functional_name + "_gradient_error", "convergence")] = min(grad_conv)
stat_writer.write()
# This function gets called after each optimisation iteration.
# It is currently used to write statistics and copy model output files into a subdirectory
def callback(m_serial, m_shape):
global iteration
iteration = iteration + 1
stat_writer[(functional_name, "iteration")] = iteration
stat_writer.write()
if superspud(opt_options, "libspud.have_option('/debug/save_model_output')"):
save_model_results()
print('-' * 80)
print(' Finished optimisation iteration', iteration)
print('-' * 80)
def save_model_results():
global iteration
# Copy any model output files in a subdirectory
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
Popen(["mkdir", "opt_"+str(iteration)+"_"+simulation_name.strip()])
Popen("cp "+simulation_name.strip()+"* "+"opt_"+str(iteration)+"_"+simulation_name.strip(), shell=True)
############################################################################
############################################################################
print('-' * 80)
print(' Beginning of optimisation loop')
print('-' * 80)
############################################################################
### Initialisation of optimisation loop ###
global iteration
iteration = 0
# Initialise stat file
if verbose:
print("Initialise stat file")
stat_writer=stat_creator(superspud(opt_options, "libspud.get_option('/name')").strip() + '.stat')
# Get the optimisation settings
if verbose:
print("Read oml settings")
algo = superspud(opt_options, "libspud.get_option('optimisation_options/optimisation_algorithm[0]/name')")
have_bound = have_bounds(opt_options, model_options)
# Create the memoized version of the functional (derivative) evaluation functions
mem_pure_dJdm = MemoizeMutable(pure_dJdm)
mem_pure_J = MemoizeMutable(pure_J)
### Get initial controls ###
### The initial controls are retrieved in several steps.
### 1) get custom controls by running the user specified python code and save the associated pkl files
### 2) run the forward/adjoint model without the "load_control" flag. The model will save the initial default controls as pkl files.
### 3) Finally load these initial default controls files
# First we initialise the custom controls
# This has to be done first since the next step
# involves running the model and therefore
# will need the custom controls to be set.
if verbose:
print("Get initial custom controls")
custom_m = get_custom_controls(opt_options)
# Next run the forward/adjoint model without the option
# /adjoint/controls/load_controls
if verbose:
print("Get initial default controls")
model_file = superspud(opt_options, "libspud.get_option('/model/option_file')")
if (superspud(model_options, "libspud.have_option('/adjoint/controls/load_controls')")):
superspud(model_options, ["libspud.delete_option('/adjoint/controls/load_controls')", "libspud.write_options('"+ model_file +"')"])
# Run the forward model including adjoint.
functional_name = superspud(opt_options, "libspud.get_option('/functional/name')")
if superspud(opt_options, "libspud.have_option('/adjoint/functional::"+functional_name+"/disable_adjoint_run')"):
superspud(opt_options, "libspud.delete_option('/adjoint/functional::"+functional_name+"/disable_adjoint_run')")
[custom_m_serial, custom_m_shape] = serialise(custom_m)
mem_pure_J(custom_m_serial, custom_m_shape)
if superspud(opt_options, "libspud.have_option('/debug/save_model_output')"):
save_model_results()
# This should have created all the default initial controls and we can now activate the load_controls flag.
superspud(model_options, ["libspud.add_option('/adjoint/controls/load_controls')", "libspud.write_options('"+ model_file +"')"])
# Finally, load the default controls
m = read_default_controls(opt_options, model_options)
m_bounds = read_default_control_bounds(opt_options, model_options)
nb_controls = len(m) + len(custom_m)
# And merge them
m.update(custom_m)
if (nb_controls != len(m)):
print("Error: Two controls with the same name defined.")
print("The controls must have all unique names.")
print("Your controls are: ", m.keys())
exit()
djdm = read_control_derivatives(opt_options, model_options)
# Now complete the bounds arrays where the user did not specify any bounds
m_bounds = complete_default_control_bounds(m, m_bounds)
# Since now all the controls and derivatives are defined, we can check the consistency of the control variables
check_control_consistency(m, djdm, m_bounds)
### Serialise the controls for the optimisation routine
[m_serial, m_shape] = serialise(m)
[m_lb_serial, m_lb_shape] = serialise(m_bounds["lower_bound"])
[m_ub_serial, m_ub_shape] = serialise(m_bounds["upper_bound"])
assert(m_ub_shape == m_shape)
assert(m_lb_shape == m_shape)
# zip the lower and upper bound to a list of tuples
m_bounds_serial = zip(m_lb_serial, m_ub_serial)
# Check gradient
if superspud(opt_options, "libspud.have_option('/debug/check_gradient')"):
check_gradient(m_serial, m_shape)
############################################################################
if algo != 'NULL':
print('-' * 80)
print(' Entering %s optimisation algorithm ' % algo)
print('-' * 80)
############################################################################
################################
########### BFGS ###############
################################
if algo == 'BFGS':
if have_bound:
print("BFGS does not support bounds.")
exit()
tol = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::BFGS/tolerance')")
maxiter=None
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::BFGS/iterations')"):
maxiter = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::BFGS/iterations')")
res = scipy.optimize.fmin_bfgs(J, m_serial, dJdm, gtol=tol, full_output=1, maxiter=maxiter, args=(m_shape, ), callback = lambda m: callback(m, m_shape))
print("Functional value J(m): ", res[1])
print("Control state m: ", res[0])
################################
########### NCG ################
################################
elif algo == 'NCG':
if have_bound:
print("NCG does not support bounds.")
exit()
tol = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::NCG/tolerance')")
maxiter=None
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::NCG/iterations')"):
maxiter = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::NCG/iterations')")
res = scipy.optimize.fmin_ncg(J, m_serial, dJdm, avextol=tol, full_output=1, maxiter=maxiter, args=(m_shape, ), callback = lambda m: callback(m, m_shape))
print("Functional value J(m): ", res[1])
print("Control state m: ", res[0])
################################
########### L-BFGS-B ###########
################################
elif algo == 'L-BFGS-B':
opt_args = dict(func=J, x0=m_serial, fprime=dJdm, args=(m_shape,))
if have_bound:
opt_args['bounds'] = m_bounds_serial
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/tolerance')"):
pgtol = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/tolerance')")
opt_args['pgtol'] = pgtol
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/factr')"):
factr = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/factr')")
opt_args['factr'] = factr
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/memory_limit')"):
memory_limit = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/memory_limit')")
opt_args['m'] = memory_limit
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/maximal_functional_evaluations')"):
maxfun = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/maximal_functional_evaluations')")
opt_args['maxfun'] = maxfun
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/verbosity')"):
iprint = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/verbosity')")
opt_args['iprint'] = iprint
res = scipy.optimize.fmin_l_bfgs_b(**opt_args)
print(res)
################################
########### NULL ##############
################################
elif algo == 'NULL':
exit()
else:
print("Unknown optimisation algorithm in option path.")
exit()
################# main() ###################
def main():
global verbose
global debug
parser = argparse.ArgumentParser(description='Optimisation program for fluidity.')
parser.add_argument('filename', metavar='FILE', help="the .oml file")
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('-d', '--debug', action='store_true', help='the debug mode runs additional internal tests.')
args = parser.parse_args()
verbose = args.verbose
debug = args.debug
if not os.path.isfile(args.filename):
print("File", args.filename, "not found.")
exit()
# Initial spud environments for the optimality and model options.
opt_file = args.filename
if not superspud(opt_file, "libspud.have_option('/optimisation_options')"):
print("File", args.filename, "is not a valid .oml file.")
exit()
model_file = superspud(opt_file, "libspud.get_option('/model/option_file')")
if not os.path.isfile(model_file):
print("Could not find ", model_file ," as specified in /model/option_file")
exit()
# Create a copy of the option files so that we don't touch the original
def rename_file(fn):
fn_basename, fn_extension = os.path.splitext(fn)
shutil.copy(fn, fn_basename+'_tmp'+fn_extension)
fn = fn_basename+'_tmp'+fn_extension
return fn
model_file = rename_file(model_file)
opt_file = rename_file(opt_file)
superspud(opt_file, ["libspud.set_option('/model/option_file', '" + model_file + "')", "libspud.write_options('" + opt_file + "')"])
# Check consistency of the option files
check_option_consistency(opt_file, model_file)
# Start the optimisation loop
optimisation_loop(opt_file, model_file)
# Tidy up
os.remove(opt_file)
os.remove(model_file)
################# __main__ ########################
if '__main__'==__name__:
start_time = time.time()
main()
print("Optimisation finished in ", time.time() - start_time, "seconds")
|
lgpl-2.1
| -6,594,667,205,454,949,000
| 44.279167
| 158
| 0.648968
| false
| 3.478182
| false
| false
| false
|
rcatwood/Savu
|
savu/plugins/filters/component_analysis/ica.py
|
1
|
2407
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: ica
:platform: Unix
:synopsis: A plugin to fit peaks
.. moduleauthor:: Aaron Parsons <scientificsoftware@diamond.ac.uk>
"""
import logging
from savu.plugins.utils import register_plugin
from savu.plugins.filters.base_component_analysis import BaseComponentAnalysis
from sklearn.decomposition import FastICA
import numpy as np
@register_plugin
class Ica(BaseComponentAnalysis):
"""
This plugin performs independent component analysis on XRD/XRF spectra.
:param w_init: The initial mixing matrix. Default: None.
:param random_state: The state. Default: 1.
"""
def __init__(self):
super(Ica, self).__init__("Ica")
def filter_frames(self, data):
logging.debug("I am starting the old componenty vous")
data = data[0]
sh = data.shape
newshape = (np.prod(sh[:-1]), sh[-1])
print "The shape of the data is:"+str(data.shape) + str(newshape)
data = np.reshape(data, (newshape))
# data will already be shaped correctly
logging.debug("Making the matrix")
ica = FastICA(n_components=self.parameters['number_of_components'],
algorithm='parallel',
whiten=self.parameters['whiten'],
w_init=self.parameters['w_init'],
random_state=self.parameters['random_state'])
logging.debug("Performing the fit")
data = self.remove_nan_inf(data) #otherwise the fit flags up an error for obvious reasons
S_ = ica.fit_transform(data)
print "S_Shape is:"+str(S_.shape)
print "self.images_shape:"+str(self.images_shape)
scores = np.reshape(S_, (self.images_shape))
eigenspectra = ica.components_
logging.debug("mange-tout")
return [scores, eigenspectra]
|
gpl-3.0
| 5,032,943,843,606,573,000
| 37.206349
| 98
| 0.666805
| false
| 3.826709
| false
| false
| false
|
thethythy/Mnemopwd
|
mnemopwd/client/uilayer/uicomponents/TitledBorderWindow.py
|
1
|
2935
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import curses
from .BaseWindow import BaseWindow
class TitledBorderWindow(BaseWindow):
"""
A window with a border and a title. Subclass of BaseWindow.
"""
def __init__(self, parent, h, w, y, x, title, modal=False, colourT=False, colourD=False):
"""Create base window"""
BaseWindow.__init__(self, parent, h, w, y, x, modal=modal)
self.title = title
self.colourT = colourT
self.colourD = colourD
self._create()
def redraw(self):
"""See mother class"""
self._create()
BaseWindow.redraw(self)
def close(self):
"""See mother class"""
if self.modal:
self.shadows.erase() # Erase shadows
self.shadows.refresh()
BaseWindow.close(self)
def _create(self):
self.window.attrset(self.colourD)
self.window.border()
self.window.addstr(1, 2, self.title, self.colourT)
self.window.hline(2, 1, curses.ACS_HLINE, self.w - 2)
# Add a shadow if it is a modal window
if self.modal:
self.shadows = curses.newwin(self.h, self.w + 1, self.y + 1,
self.x + 1)
self.shadows.attrset(self.colourD)
self.shadows.addstr(self.h - 1, 0, chr(0x2580)*self.w) # Horizontal
for i in range(0, self.h - 1):
self.shadows.addstr(i, self.w - 1, chr(0x2588)) # Vertical
self.shadows.refresh()
self.window.refresh()
self.window.attrset(0)
|
bsd-2-clause
| -3,652,827,740,056,266,000
| 39.205479
| 93
| 0.667121
| false
| 3.887417
| false
| false
| false
|
no-net/gr-winelo
|
python/client/sim_source_c.py
|
1
|
12656
|
import numpy
#from grc_gnuradio import blks2 as grc_blks2
from gnuradio import gr, uhd, blocks # , analog
from gruel import pmt
# import grextras for python blocks
import gnuradio.extras
from twisted.internet import reactor
#import thread
from threading import Thread
import time
import random
from winelo.client import SendFactory, uhd_gate
#from winelo.client.tcp_blocks import tcp_source
class sim_source_cc(gr.block):
def __init__(self, hier_blk, serverip, serverport, clientname,
packetsize, samp_rate, center_freq, net_id=0):
gr.block.__init__(
self,
name="WiNeLo source",
in_sig=[numpy.complex64],
out_sig=[numpy.complex64],
)
print '[INFO] WiNeLo - Instantiating %s' % clientname
self.hier_blk = hier_blk
# this will store all samples that came from twisted
self.samples = numpy.zeros(0)
# this is used to connect the block to the twisted reactor
self.twisted_conn = None
self.net_id = net_id
# Needed for WiNeLo-time
self.virtual_counter = 0
# Evaluated for timed commands -> can be higher/absolute (GPS time)
self.virtual_time = 0
self.virt_offset = 0
self.absolute_time = True
self.samp_rate = samp_rate
# Port used by tcp source/sink for sample transmission
self.dataport = None
self.packet_size = packetsize
self.samples_to_produce = self.packet_size
self.drop_one_in_n_cmds = 0 # TODO: was 50 for per_measurement!
# TODO: DEBUG
#self.no_zero_counter = 0
self.dbg_counter = 0
# connect to the server
reactor.connectTCP(serverip,
serverport,
SendFactory(self, {'type': 'rx',
'name': clientname,
'centerfreq': center_freq,
'samprate': self.samp_rate,
'net_id': self.net_id,
'packet_size': packetsize})
)
if not reactor.running:
print '[INFO] WiNeLo - Starting the reactor'
#thread.start_new_thread(reactor.run, (),
#{'installSignalHandlers': 0})
Thread(target=reactor.run, args=(False,)).start()
else:
time.sleep(2)
print '[INFO] WiNeLo - giving twisted time to setup and block ' \
'everything'
time.sleep(3)
def work(self, input_items, output_items):
#print "Source work called"
self.twisted_conn.condition.acquire()
if self.virtual_counter == 0:
self.generate_rx_tags()
while True:
# this is necessary because twisted and gnuradio are running in
# different threads. So it is possible that new samples arrive
# while gnuradio is still working on the old samples
if len(input_items[0]) is 0:
#print "DEBUG: sim_source - waiting for items"
self.twisted_conn.condition.wait()
#print "DEBUG: sim_source - got items"
#if len(input_items[0]) is 0:
# return 0
if self.samples_to_produce <= len(input_items[0]) and \
self.samples_to_produce > 0:
produce_n_samples = self.samples_to_produce
else:
produce_n_samples = len(input_items[0])
if produce_n_samples > len(output_items[0]):
produce_n_samples = len(output_items[0])
#print "DEBUG: src - produce_n: %s - samples_to_produce: %s" \
#% (produce_n_samples, self.samples_to_produce)
#elif self.samples_to_produce < len(input_items[0]):
# print "DEBUG: samples to produce:", self.samples_to_produce,"\
#" - len input:", len(input_items[0]), " - len output:", \
#len(output_items[0])
# if self.samples_to_produce > 0:
# output_items[0][:] = \
#input_items[0][0:self.samples_to_produce]
#else:
output_items[0][0:produce_n_samples] = \
input_items[0][0:produce_n_samples]
### DEBUG:
#no_zeros_last = self.no_zero_counter
#for item in output_items[0][0:produce_n_samples]:
# if item != 0:
# self.no_zero_counter += 1
#if self.no_zero_counter != no_zeros_last:
# print "self.no_zero_counter", self.no_zero_counter
#elif len(input_items[0]) < len(output_items[0]):
# n_processed = len(input_items[0])
# output_items[0] = input_items[0]
#print "Source processed:", n_processed
#print "DEBUG: sim_source - elif - items processed:",
#n_processed
#time.sleep(1.0 / self.samp_rate * n_processed)
#else:
# n_processed = len(output_items[0])
# output_items[0] = input_items[0][0:n_processed]
#print "Source processed:", n_processed
#print "DEBUG: sim_source - else - items processed:", \
#n_processed
#time.sleep(1.0 / self.samp_rate * n_processed)
self.timeout_start = None
self.virtual_counter += produce_n_samples
self.virtual_time += produce_n_samples / float(self.samp_rate)
# TODO TODO: Produce max. diff samples, then call commands before
# running again!
# CHECK TIMED COMMANDS
if len(self.hier_blk.commands) > 0 and \
len(self.hier_blk.command_times) > 0:
#print "DEBUG: evaluating cmd times"
cmd_time, n_cmds = self.hier_blk.command_times[0]
#print "DEBUG: time %s - n_cmds %s - virt_time %s" \
#% (time, n_cmds, self.virtual_time)
while self.virtual_time > (cmd_time + 0.0065):
#print "DEBUG: calling run_timed_cmds"
if self.drop_one_in_n_cmds > 0:
rand_no = random.randint(1, self.drop_one_in_n_cmds)
else:
rand_no = 0
if rand_no == 1:
self.hier_blk.command_times.pop(0)
self.hier_blk.commands.pop(0)
print "[INFO] WiNeLo - Dropped cmd due to HW model!"
else:
#print "DEBUG: RxRxRx - Tuning cmd sent at: %s - " \
#"CMD time: %s" % (self.virtual_time, cmd_time)
#print "DEBUG: Set RX-freq to %s at %s" \
#% (self.hier_blk.commands[0][1], cmd_time)
#print "DEBUG: virtual counter:", self.virtual_counter
self.hier_blk.command_times.pop(0)
#print "DEBUG---------------------hier_blk_cmd_times",\
#self.hier_blk.command_times
self.run_timed_cmds(n_cmds)
if len(self.hier_blk.command_times) > 0:
#print "DEBUG: NEW TIME, CMDS"
cmd_time, n_cmds = self.hier_blk.command_times[0]
else:
break
#if produce_n_samples < self.p_size:
# print "DEBUG: source - ACK less samples"
self.samples_to_produce -= produce_n_samples
#print "DEBUG: NO ACK sent"
#print "DEBUG: NO ACK - produced:", len(output_items[0])
#print "DEBUG: NO ACK - samples to produce:", \
#self.samples_to_produce
#print "DEBUG: NO ACK - len input", len(input_items[0])
if self.samples_to_produce == 0:
self.dbg_counter += 1
#print "DEBUG: ACK senti no:", self.dbg_counter
#print "DEBUG: ACK - produced:", produce_n_samples
self.twisted_conn.samplesReceived()
self.samples_to_produce = self.packet_size
self.twisted_conn.condition.release()
#print "DEBUG: sim_src - produced:", n_processed
return produce_n_samples
def run_timed_cmds(self, n_cmds):
for i in range(n_cmds):
cmd, args = self.hier_blk.commands.pop()
#print "DEBUG: src - running cmd %s with args %s" % (cmd, args)
cmd(*args)
def new_samples_received(self, samples):
self.samples = numpy.append(self.samples, samples)
def set_connection(self, connection):
self.twisted_conn = connection
def set_dataport(self, port):
self.dataport = port
print '[INFO] WiNeLo - Port %s will be used for data transmission' \
% self.dataport
def set_packetsize(self, packet_size):
self.packet_size = packet_size
if self.samples_to_produce > self.packet_size:
self.samples_to_produce = self.packet_size
def update_virttime(self, time_offset):
if self.absolute_time:
print "[INFO] WiNeLo - Setting source time to server time:", \
time_offset
self.virtual_time += time_offset
self.virt_offset = time_offset
def get_dataport(self):
while self.dataport is None:
reactor.callWhenRunning(time.sleep, 0.5)
return self.dataport
def get_time_now(self):
# Calculate time according tot the sample rate & the number of
# processed items
#time = 1.0 / self.samp_rate * self.virtual_counter
time = self.virtual_time
full_secs = int(time)
frac_secs = time - int(time)
# Return full & fractional seconds (like UHD)
return full_secs, frac_secs
def generate_rx_tags(self):
#Produce tags
offset = self.nitems_written(0) + 0
key_time = pmt.pmt_string_to_symbol("rx_time")
#value_time = pmt.from_python(1.0 /
#self.samp_rate * self.virtual_counter)
value_time = pmt.from_python(self.get_time_now())
key_rate = pmt.pmt_string_to_symbol("rx_rate")
value_rate = pmt.from_python(self.samp_rate)
self.add_item_tag(0, offset, key_time, value_time)
self.add_item_tag(0, offset, key_rate, value_rate)
class sim_source_c(gr.hier_block2, uhd_gate):
"""
Wireless Netowrks In-the-Loop source
Note: This is not a subclass of uhd.usrp_source because some methods
shouldn't be available at all for this block.
"""
def __init__(self, serverip, serverport, clientname, packetsize,
simulation, samp_rate, center_freq, net_id,
device_addr, stream_args):
gr.hier_block2.__init__(self, "sim_source_c",
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
uhd_gate.__init__(self)
self.simulation = simulation
self.serverip = serverip
self.samp_rate = samp_rate
self.typ = 'rx'
if not self.simulation:
self.usrp = uhd.usrp_source(device_addr, stream_args)
# TODO: Parameters
self.connect(self.usrp, self)
else:
self.simsrc = sim_source_cc(self, serverip, serverport, clientname,
packetsize, samp_rate, center_freq,
net_id)
# TODO: dirty hack!!!
# self.tcp_source = grc_blks2.tcp_source(itemsize=gr.sizeof_gr_complex,
# addr=self.serverip,
# port=self.simsrc.get_dataport(),
# server=False)
self.tcp_source = gr.udp_source(itemsize=gr.sizeof_gr_complex,
host=self.serverip,
port=self.simsrc.get_dataport(),
payload_size=1472,
eof=False,
wait=True)
self.gain_blk = blocks.multiply_const_vcc((1, ))
self.connect(self.tcp_source, self.gain_blk, self.simsrc, self)
|
gpl-3.0
| -4,139,212,272,071,479,300
| 43.720848
| 84
| 0.514855
| false
| 3.997473
| false
| false
| false
|
stettberger/metagit
|
gmp/tools.py
|
1
|
1689
|
from gmp.options import *
import subprocess
import tempfile
import os
class ScreenExecutor:
instance = None
def __init__(self):
self.screen_fd, self.screen_path = tempfile.mkstemp()
self.counter = 0
self.screen_fd = os.fdopen(self.screen_fd, "w")
def get():
if not ScreenExecutor.instance:
ScreenExecutor.instance = ScreenExecutor()
return ScreenExecutor.instance
get = staticmethod(get)
def push(cmd):
i = ScreenExecutor.get()
i.screen_fd.write("""screen %d sh -c "echo; echo '%s' ; %s; echo Press ENTER;read a"\n"""
% (i.counter, cmd.replace("'", "\\\""), cmd))
i.counter += 1
push = staticmethod(push)
def execute():
if Options.opt('screen'):
i = ScreenExecutor.get()
i.screen_fd.write('caption always "%{wR}%c | %?%-Lw%?%{wB}%n*%f %t%?(%u)%?%{wR}%?%+Lw%?"\n')
i.screen_fd.close()
a = subprocess.Popen("screen -c %s" % i.screen_path, shell=True)
a.wait()
os.unlink(i.screen_path)
execute = staticmethod(execute)
def esc(str):
str = str.replace("\\", "\\\\")
quote = False
for c in " ;&|{}()$":
if c in str:
quote = True
if quote:
return "'" + str.replace("'", "\\'") + "'"
return str
echo_exec = True
def execute(cmd, echo=True):
if Options.opt('screen'):
ScreenExecutor.push(cmd)
return
if echo:
print(cmd)
a = subprocess.Popen(cmd, shell=(type(cmd) == str))
# Just wait here if we are not in parallel mode
if not Options.opt('parallel'):
a.wait()
return a
|
gpl-3.0
| -679,937,508,764,406,700
| 26.688525
| 104
| 0.541149
| false
| 3.563291
| false
| false
| false
|
Wizmann/DjangoSimditor
|
bootstrap3/renderers.py
|
1
|
12414
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import (TextInput, DateInput, FileInput, CheckboxInput,
ClearableFileInput, Select, RadioSelect, CheckboxSelectMultiple)
from django.forms.extras import SelectDateWidget
from django.forms.forms import BaseForm, BoundField
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, strip_tags
from django.template import Context
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting
from bootstrap3.text import text_value
from .exceptions import BootstrapError
from .html import add_css_class
from .forms import (render_field, render_label, render_form_group,
is_widget_with_placeholder, is_widget_required_attribute, FORM_GROUP_CLASS)
class FormRenderer(object):
"""
Default form renderer
"""
def __init__(self, form, layout='', form_group_class=FORM_GROUP_CLASS,
field_class='', label_class='', show_help=True, exclude='',
set_required=True):
if not isinstance(form, BaseForm):
raise BootstrapError(
'Parameter "form" should contain a valid Django Form.')
self.form = form
self.layout = layout
self.form_group_class = form_group_class
self.field_class = field_class
self.label_class = label_class
self.show_help = show_help
self.exclude = exclude
self.set_required = set_required
def render_fields(self):
rendered_fields = []
for field in self.form:
rendered_fields.append(render_field(
field,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
show_help=self.show_help,
exclude=self.exclude,
set_required=self.set_required,
))
return '\n'.join(rendered_fields)
def get_form_errors(self):
form_errors = []
for field in self.form:
if field.is_hidden and field.errors:
form_errors += field.errors
return form_errors + self.form.non_field_errors()
def render_errors(self):
form_errors = self.get_form_errors()
if form_errors:
errors = '\n'.join(['<p>{e}</p>'.format(e=e) for e in form_errors])
return '''
<div class="alert alert-danger alert-dismissable alert-link">
<button class="close" data-dismiss="alert" aria-hidden="true">
×</button>{errors}</div>\n'''.format(errors=errors)
return ''
def render(self):
return self.render_errors() + self.render_fields()
class FieldRenderer(object):
"""
Default field renderer
"""
def __init__(self, field, layout='', form_group_class=FORM_GROUP_CLASS,
field_class=None, label_class=None, show_label=True,
show_help=True, exclude='', set_required=True,
addon_before=None, addon_after=None):
# Only allow BoundField
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
self.layout = layout
self.form_group_class = form_group_class
self.field_class = field_class
self.label_class = label_class
self.show_label = show_label
self.exclude = exclude
self.set_required = set_required
self.widget = field.field.widget
self.initial_attrs = self.widget.attrs.copy()
self.field_help = force_text(mark_safe(field.help_text)) if show_help and field.help_text else ''
self.field_errors = [conditional_escape(force_text(error)) for error in field.errors]
self.placeholder = field.label
self.form_error_class = getattr(field.form, 'error_css_class', '')
self.form_required_class = getattr(field.form, 'required_css_class', '')
self.addon_before = addon_before
self.addon_after = addon_after
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs
def add_class_attrs(self):
self.widget.attrs['class'] = self.widget.attrs.get('class', '')
if not isinstance(self.widget, (CheckboxInput,
RadioSelect,
CheckboxSelectMultiple,
FileInput)):
self.widget.attrs['class'] = add_css_class(
self.widget.attrs['class'], 'form-control')
def add_placeholder_attrs(self):
placeholder = self.widget.attrs.get('placeholder', self.placeholder)
if placeholder and is_widget_with_placeholder(self.widget):
self.widget.attrs['placeholder'] = placeholder
def add_help_attrs(self):
title = self.widget.attrs.get('title', strip_tags(self.field_help))
if not isinstance(self.widget, CheckboxInput):
self.widget.attrs['title'] = title
def add_required_attrs(self):
if self.set_required and is_widget_required_attribute(self.widget):
self.widget.attrs['required'] = 'required'
def add_widget_attrs(self):
self.add_class_attrs()
self.add_placeholder_attrs()
self.add_help_attrs()
self.add_required_attrs()
def list_to_class(self, html, klass):
mapping = [
('<ul', '<div'),
('</ul>', '</div>'),
('<li', '<div class="{klass}"'.format(klass=klass)),
('</li>', '</div>'),
]
for k, v in mapping:
html = html.replace(k, v)
return html
def put_inside_label(self, html):
content = '{field} {label}'.format(field=html, label=self.field.label)
return render_label(content=content, label_title=strip_tags(self.field_help))
def fix_date_select_input(self, html):
div1 = '<div class="col-xs-4">'
div2 = '</div>'
html = html.replace('<select', div1 + '<select')
html = html.replace('</select>', '</select>' + div2)
return '<div class="row bootstrap3-multi-input">' + html + '</div>'
def fix_clearable_file_input(self, html):
"""
Fix a clearable file input
TODO: This needs improvement
Currently Django returns
Currently: <a href="dummy.txt">dummy.txt</a> <input id="file4-clear_id" name="file4-clear" type="checkbox" /> <label for="file4-clear_id">Clear</label><br />Change: <input id="id_file4" name="file4" type="file" /><span class=help-block></span></div>
"""
# TODO This needs improvement
return '<div class="row bootstrap3-multi-input"><div class="col-xs-12">' + html + '</div></div>'
def post_widget_render(self, html):
if isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, 'radio')
elif isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, 'checkbox')
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, ClearableFileInput):
html = self.fix_clearable_file_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.put_inside_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
html = '<div class="checkbox">{content}</div>'.format(content=html)
return html
def make_input_group(self, html):
if ((self.addon_before or self.addon_after) and
isinstance(self.widget, (TextInput, DateInput, Select))
):
before = '<span class="input-group-addon">{addon}</span>'.format(
addon=self.addon_before) if self.addon_before else ''
after = '<span class="input-group-addon">{addon}</span>'.format(
addon=self.addon_after) if self.addon_after else ''
html = '<div class="input-group">{before}{html}{after}</div>'.format(
before=before, after=after, html=html)
return html
def append_to_field(self, html):
help_text_and_errors = [self.field_help] + self.field_errors \
if self.field_help else self.field_errors
if help_text_and_errors:
help_html = get_template(
'bootstrap3/field_help_text_and_errors.html').render(Context({
'field': self.field,
'help_text_and_errors': help_text_and_errors,
'layout': self.layout,
}))
html += '<span class="help-block">{help}</span>'.format(help=help_html)
return html
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == 'horizontal':
field_class = get_bootstrap_setting('horizontal_field_class')
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = '<div class="{klass}">{html}</div>'.format(klass=field_class, html=html)
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == 'horizontal':
label_class = get_bootstrap_setting('horizontal_label_class')
label_class = text_value(label_class)
if not self.show_label:
label_class = add_css_class(label_class, 'sr-only')
return add_css_class(label_class, 'control-label')
def get_label(self):
if isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == 'horizontal' and not label:
return ' '
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors and self.form_error_class:
form_group_class = add_css_class(
form_group_class, self.form_error_class)
if self.field.field.required and self.form_required_class:
form_group_class = add_css_class(
form_group_class, self.form_required_class)
if self.field_errors:
form_group_class = add_css_class(form_group_class, 'has-error')
elif self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, 'has-success')
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(' ', '').split(','):
return ''
# Hidden input requires no special treatment
if self.field.is_hidden:
return force_text(self.field)
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
html = self.post_widget_render(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
class InlineFieldRenderer(FieldRenderer):
"""
Inline field renderer
"""
def add_error_attrs(self):
field_title = self.widget.attrs.get('title', '')
field_title += ' ' + ' '.join([strip_tags(e) for e in self.field_errors])
self.widget.attrs['title'] = field_title.strip()
def add_widget_attrs(self):
super(InlineFieldRenderer, self).add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, 'sr-only')
|
mit
| -1,456,957,295,543,779,600
| 38.788462
| 257
| 0.599807
| false
| 3.902546
| false
| false
| false
|
dimatura/opendr
|
slider_demo.py
|
1
|
3446
|
from cvwrap import cv2
import numpy as np
import chumpy as ch
from copy import deepcopy
def nothing(x):
pass
def get_renderer():
import chumpy as ch
from opendr.everything import *
# Load mesh
m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
m.v += ch.array([0,0,3.])
w, h = (320, 240)
trans = ch.array([[0,0,0]])
# Construct renderer
rn = TexturedRenderer()
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=trans+m.v, f=m.f, texture_image=m.texture_image[:,:,::-1], ft=m.ft, vt=m.vt, bgcolor=ch.zeros(3))
rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f), components=ch.array([4.,0.,0.,0.]), light_color=ch.ones(3))
return rn
def main():
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
cv2.namedWindow('derivatives')
rn = get_renderer()
tracked = {
'sph0': rn.vc.components[0],
'sph1': rn.vc.components[1],
'sph2': rn.vc.components[2],
'sph3': rn.vc.components[3],
'k0': rn.camera.k[0],
'k1': rn.camera.k[1],
'k2': rn.camera.k[2]
}
cnst = 1000
for k in sorted(tracked.keys()):
v = tracked[k]
cv2.createTrackbar(k, 'image', 0,cnst, nothing)
old_tracked = tracked
cv2.setTrackbarPos('sph0', 'image', 800)
while(1):
cv2.imshow('image',rn.r)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
for k, v in tracked.items():
v[:] = np.array(cv2.getTrackbarPos(k, 'image')).astype(np.float32)*4/cnst
if tracked[k].r[0] != old_tracked[k].r[0]:
drim = rn.dr_wrt(v).reshape(rn.shape)
mn = np.mean(drim)
drim /= np.max(np.abs(drim.ravel()))*2.
drim += .5
# drim = drim - np.min(drim)
# drim = drim / np.max(drim)
cv2.imshow('derivatives', drim)
cv2.waitKey(1)
old_tracked = deepcopy(tracked)
# while True:
# for k_change in sorted(tracked.keys()):
# if k_change == 'sph0':
# continue
# for t in np.arange(0, np.pi, .05):
# cv2.setTrackbarPos(k_change, 'image', int(np.sin(t)*1000))
# cv2.imshow('image',rn.r)
# k = cv2.waitKey(1) & 0xFF
# if k == 27:
# break
#
# for k, v in tracked.items():
# v[:] = np.array(cv2.getTrackbarPos(k, 'image')).astype(np.float32)*4/cnst
# if tracked[k].r[0] != old_tracked[k].r[0]:
# drim = rn.dr_wrt(v).reshape(rn.shape)
# mn = np.mean(drim)
# drim /= np.max(np.abs(drim.ravel()))*2.
# drim += .5
# # drim = drim - np.min(drim)
# # drim = drim / np.max(drim)
# cv2.imshow('derivatives', drim)
#
#
# print rn.vc.components
#
# cv2.waitKey(1)
# old_tracked = deepcopy(tracked)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
mit
| 3,740,670,982,097,944,000
| 31.509434
| 126
| 0.482008
| false
| 2.973253
| false
| false
| false
|
ceteri/pytextrank
|
setup.py
|
1
|
1642
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pytextrank",
version="2.0.3",
author="Paco Xander Nathan",
author_email="paco@derwen.ai",
description="Python implementation of TextRank for phrase extraction and lightweight summarization of text documents",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://github.com/DerwenAI/pytextrank",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Text Processing :: General",
"Topic :: Text Processing :: Indexing",
"Topic :: Text Processing :: Linguistic",
],
python_requires=">=3.5",
install_requires=[
"coverage",
"graphviz",
"networkx",
"spacy",
],
keywords="textrank, spacy, phrase extraction, parsing, natural language processing, nlp, knowledge graph, graph algorithms, text analytics, extractive summarization",
license="MIT",
zip_safe=False,
)
|
apache-2.0
| -4,886,367,476,329,915,000
| 38.095238
| 170
| 0.64799
| false
| 4.287206
| false
| false
| false
|
bjornedstrom/python-aesgcm
|
test/test.py
|
1
|
2446
|
from binascii import hexlify, unhexlify
import unittest
import aesgcm
class TestVectors(unittest.TestCase):
VECTORS = [
{
'key': unhexlify(b'0000000000000000000000000000000000000000000000000000000000000000'),
'iv': unhexlify(b'000000000000000000000000'),
'aad': None,
'ptx': unhexlify(b'00000000000000000000000000000000'),
'ctx': unhexlify(b'cea7403d4d606b6e074ec5d3baf39d18'),
'tag': unhexlify(b'd0d1c8a799996bf0265b98b5d48ab919')
},
{
'key': unhexlify(b'0000000000000000000000000000000000000000000000000000000000000000'),
'iv': unhexlify(b'000000000000000000000000'),
'aad': unhexlify(b'00000000000000000000000000000000'),
'ptx': None,
'ctx': None,
'tag': unhexlify(b'2d45552d8575922b3ca3cc538442fa26')
},
{
'key': unhexlify(b'0000000000000000000000000000000000000000000000000000000000000000'),
'iv': unhexlify(b'000000000000000000000000'),
'aad': unhexlify(b'00000000000000000000000000000000'),
'ptx': unhexlify(b'00000000000000000000000000000000'),
'ctx': unhexlify(b'cea7403d4d606b6e074ec5d3baf39d18'),
'tag': unhexlify(b'ae9b1771dba9cf62b39be017940330b4')
}
]
def _verify_vec(self, vec):
enc = aesgcm.EncryptObject(vec['key'], vec['iv'])
dec = aesgcm.DecryptObject(vec['key'], vec['iv'], vec['tag'])
if vec['aad']:
enc.update_aad(vec['aad'])
dec.update_aad(vec['aad'])
if vec['ptx'] and vec['ctx']:
self.assertEqual(vec['ctx'], enc.encrypt(vec['ptx']))
self.assertEqual(vec['ptx'], dec.decrypt(vec['ctx']))
self.assertEqual(vec['tag'], enc.finalize())
self.assertTrue(dec.finalize())
def test_vec_1(self):
self._verify_vec(self.VECTORS[0])
def test_vec_2(self):
self._verify_vec(self.VECTORS[1])
def test_vec_3(self):
self._verify_vec(self.VECTORS[2])
def test_invalid_tag(self):
vec = self.VECTORS[0]
invalid_tag = unhexlify(b'00000000000000000000000000000000')
dec = aesgcm.DecryptObject(vec['key'], vec['iv'], invalid_tag)
dec.decrypt(vec['ctx'])
self.assertRaises(aesgcm.AuthenticationError, dec.finalize)
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
| -3,236,481,412,823,010,000
| 33.942857
| 98
| 0.60834
| false
| 3.694864
| true
| false
| false
|
Shiruyaka/MeowMeow
|
Keyring.py
|
1
|
2692
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import time
import random
from Crypto.PublicKey import RSA
PrivateRing = namedtuple('PrivateRing', 'timestamp key_id pub_key priv_key')
PublicRing = namedtuple('PublicRing', 'timestamp key_id pub_key owner_trust user_name key_legit')
def import_keyring(typeOfKeyRing):
ring = list()
try:
with open(typeOfKeyRing + '_keyring.txt', 'r') as r:
data = r.read()
data = data.rstrip().split('@')
for line in data:
if not line:
continue
line = line.rstrip().split('|')
if typeOfKeyRing == 'priv':
ring.append(PrivateRing(*line))
elif typeOfKeyRing == 'pub':
ring.append(PublicRing(*line))
except IOError:
new_file = open(typeOfKeyRing + '_keyring.txt', 'w')
new_file.close()
return ring
def export_keyring(ring, typeOfKeyRing):
with open(typeOfKeyRing + '_keyring.txt', 'w') as w:
for key in ring:
record = ''
for attr in key:
record += attr + '|'
record = record.rstrip('|')
record += '@'
w.write(record)
def add_to_keyring(ring, typeOfKeyRing, attributes):
if typeOfKeyRing == 'priv':
ring.append(PrivateRing(*attributes))
else:
ring.append(PublicRing(*attributes))
return ring
######randomly choose key from private keyring to encrypt
def find_pubkey_in_ring(ring, id = None, whose = None):
if id:
result = [x.pub_key for x in ring if x.key_id == id]
if len(result) == 0:
return None
else:
return RSA.importKey(result[0])
elif whose:
result = [x.pub_key for x in ring if x.user_name == whose]
if len(result) == 0:
return None
else:
print len(result)
ind = random.randint(0, len(result) - 1)
print ind
return RSA.importKey(result[ind])
def find_privkey_in_ring(ring, id):
result = [x.priv_key for x in ring if x.key_id == id]
if len(result) != 0:
return RSA.importKey(result[0])
else:
return []
def choose_randomly_enc_key(ring):
ind = random.randint(0,len(ring) - 1)
return RSA.importKey(ring[ind].priv_key), ring[ind].key_id
def parse_keys_from_db(data):
ring = list()
for i in data:
tmstmp = time.mktime(i[0].timetuple())
id = i[1]
pub_key = str(i[2])
usr_name = i[3]
trust = i[4]
ring.append(PublicRing(tmstmp, id, pub_key , 0, usr_name, trust))
return ring
|
mit
| 4,771,661,473,854,343,000
| 25.92
| 98
| 0.554978
| false
| 3.628032
| false
| false
| false
|
htcondor/job_hooks
|
hooks/hook_reply_fetch.py
|
1
|
3170
|
#!/usr/bin/python
# Copyright 2008 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import pickle
import sys
import os
import logging
from condorutils import SUCCESS, FAILURE
from condorutils.workfetch import *
from condorutils.socketutil import *
from condorutils.osutil import *
from condorutils.readconfig import *
from condorutils.log import *
def main(argv=None):
if argv is None:
argv = sys.argv
log_name = os.path.basename(argv[0])
try:
config = read_condor_config('JOB_HOOKS', ['IP', 'PORT', 'LOG'], permit_param_only = False)
except ConfigError, error:
try:
print >> sys.stderr, 'Warning: %s' % error.msg
print >> sys.stderr, 'Attemping to read config from "/etc/condor/job-hooks.conf"'
config = read_config_file('/etc/condor/job-hooks.conf', 'Hooks')
except ConfigError, error:
print >> sys.stderr, 'Error: %s. Exiting' % error.msg
return(FAILURE)
try:
size = int(read_condor_config('', ['MAX_JOB_HOOKS_LOG'])['max_job_hooks_log'])
except:
size = 1000000
base_logger = create_file_logger(log_name, '%s.reply' % config['log'], logging.INFO, size=size)
log(logging.INFO, log_name, 'Hook called')
# Create a reply_fetch notification
request = condor_wf()
reply_type = sys.argv[1]
if reply_type == 'accept':
request.type = condor_wf_types.reply_claim_accept
elif reply_type == 'reject':
request.type = condor_wf_types.reply_claim_reject
else:
log(logging.ERROR, log_name, 'Received unknown reply fetch type: %s' % reply_type)
return(FAILURE)
# Store the ClassAd from STDIN in the data portion of the message
request.data = ''
for line in sys.stdin:
request.data = request.data + str(line)
slots = grep('^WF_REQ_SLOT\s*=\s*"(.+)"$', request.data)
if slots != None:
log(logging.INFO, log_name, 'Slot %s is making the request' % slots[0].strip())
# Send the message
log(logging.INFO, log_name, 'Contacting daemon')
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client_socket.connect((config['ip'], int(config['port'])))
client_socket.send(pickle.dumps(request, 2))
except Exception, error:
try:
close_socket(client_socket)
except:
pass
log(logging.ERROR, log_name, 'socket error %d: %s' % (error[0], error[1]))
try:
close_socket(client_socket)
except SocketError, error:
log(logging.WARNING, log_name, error.msg)
log(logging.INFO, log_name, 'Hook exiting')
return(SUCCESS)
if __name__ == '__main__':
sys.exit(main())
|
apache-2.0
| -4,077,373,557,673,507,000
| 31.680412
| 98
| 0.662145
| false
| 3.47207
| true
| false
| false
|
pombredanne/discern
|
examples/problem_grader/grader/migrations/0001_initial.py
|
1
|
7167
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Rubric'
db.create_table(u'grader_rubric', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('associated_problem', self.gf('django.db.models.fields.IntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'grader', ['Rubric'])
# Adding model 'RubricOption'
db.create_table(u'grader_rubricoption', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('rubric', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['grader.Rubric'])),
('option_points', self.gf('django.db.models.fields.IntegerField')()),
('option_text', self.gf('django.db.models.fields.TextField')()),
('selected', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'grader', ['RubricOption'])
# Adding model 'UserProfile'
db.create_table(u'grader_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('api_key', self.gf('django.db.models.fields.TextField')(default='')),
('api_user', self.gf('django.db.models.fields.TextField')(default='')),
('api_user_created', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'grader', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'Rubric'
db.delete_table(u'grader_rubric')
# Deleting model 'RubricOption'
db.delete_table(u'grader_rubricoption')
# Deleting model 'UserProfile'
db.delete_table(u'grader_userprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'grader.rubric': {
'Meta': {'object_name': 'Rubric'},
'associated_problem': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'grader.rubricoption': {
'Meta': {'object_name': 'RubricOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option_points': ('django.db.models.fields.IntegerField', [], {}),
'option_text': ('django.db.models.fields.TextField', [], {}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['grader.Rubric']"}),
'selected': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'grader.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'api_key': ('django.db.models.fields.TextField', [], {'default': "''"}),
'api_user': ('django.db.models.fields.TextField', [], {'default': "''"}),
'api_user_created': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['grader']
|
agpl-3.0
| 4,707,439,369,962,626,000
| 60.793103
| 187
| 0.566206
| false
| 3.681048
| false
| false
| false
|
deepgraphs/dgraphdb
|
restDgraphDb/dgraphdb/dgraphdbstore.py
|
1
|
1380
|
__author__ = 'mpetyx'
from rdflib import Graph
# import rdflib.plugin
from django.conf import settings
import datetime
import os
# register('SQLite', Store, 'rdflib.store.SQLite', 'SQLite')
def random_file_generating():
basename = "deepGraphFile"
suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
middle = os.urandom(16).encode('hex')
filename = "_".join([basename, middle, suffix])
return filename
class DeepGraphStore():
store_name = settings.DEEPGRAPHS_DEFAULT_STORAGE
def __init__(self, create=True):
self.create = create
self.path = "databases/" + random_file_generating()
def setUp(self):
self.graph = Graph(store=self.store_name)
self.graph.open(self.path, create=self.create)
if self.create:
self.graph.parse("http://njh.me/foaf.rdf", format='xml')
self.graph.commit()
def open(self, path):
self.graph = Graph(self.store_name).open(path, False)
return self.graph.__len__
def query(self, sparql_query):
return self.graph.query(sparql_query)
def parse(self, path_to_file_):
self.graph.parse(path_to_file_)
def load(self, triples):
self.graph.load(triples)
def close(self):
self.graph.close()
def size(self):
size = self.graph.__len__
self.close()
return size
|
mit
| 5,964,938,748,346,534,000
| 25.037736
| 68
| 0.624638
| false
| 3.458647
| false
| false
| false
|
aysteph3/MiniMIPS_Testing
|
src/algorithm_opt_greedy_ver2.py
|
1
|
9513
|
# Copyright (C) 2017 Siavoosh Payandeh Azad, Stephen Oyeniran
# for each new function we start from empty set!
import Logger
import sys
import copy
import itertools
import time
import package
package.generate_folders(package.generated_files_folder)
sys.stdout = Logger.Logger(package.generated_files_folder)
if "-sp" in sys.argv[1:]:
saf_output_patterns_file_name= package.generated_files_folder + "/" +"SAF"+ sys.argv[sys.argv.index('-sp') + 1]
else:
saf_output_patterns_file_name= package.generated_files_folder + "/" + "SAFpatterns.txt"
def check_if_sufficient(function_dict, function_id_1, function_id_2, list_patterns, debug, verbose):
or_op = "0"*package.data_width
if debug:
print "\t--------------------"
print "\tchecking if sufficient number of ones reached!"
print "\t\tline\top1\t\top2\t\tfunc_"+str(function_id_1)+" \t\t func_"+str(function_id_2)+"\t\txor(1,2)\tand(1,xor)\tor(prev_or,and)"
print "\t\t"+"------------------------------------------"*3
for i in list_patterns:
xor_op = format(int(function_dict[i][function_id_1], 2) ^ int(function_dict[i][function_id_2], 2), 'b').zfill(package.data_width)
and_op = format(int(function_dict[i][function_id_2], 2) & int(xor_op, 2), 'b').zfill(package.data_width)
or_op = format(int(or_op, 2) | int(and_op, 2), 'b').zfill(package.data_width)
if debug:
print "\t\t"+str(i)+"\t", function_dict[i][0],"\t", function_dict[i][1],"\t", function_dict[i][function_id_1], "\t", function_dict[i][function_id_2], "\t",str(xor_op).zfill(package.data_width), "\t"+str(and_op)+ "\t"+str(or_op)
if or_op == "1"*package.data_width:
if verbose:
print "\tbingo! all ones!"
return or_op
else:
if debug and verbose:
print "\tdidnt reach all ones!"
return or_op
input_file_name, verbose, debug, output_table_file_name, output_patterns_file_name, scanning_table_file_name, redundant_function_reduction = package.parse_program_arg(sys.argv, package.generated_files_folder)
data_width = package.data_width
print "data_width:", data_width
start_time = time.time()
function_dict = copy.deepcopy(package.parse_input_pattern_file(input_file_name))
len_of_list = len(function_dict[function_dict.keys()[0]])
number_of_lines = len(function_dict.keys())
try:
table_file = open(output_table_file_name, 'w')
scanning_table_file = open(scanning_table_file_name, 'w')
test_patterns_file = open(output_patterns_file_name, 'w')
saf_test_patterns_file = open(saf_output_patterns_file_name, 'w')
except IOError:
print "Could not open input pattern file, test pattern file, conformity or scanning table file!"
sys.exit()
if package.test_subset:
function_list = []
for item in package.test_only_list:
function_list.append(item+1)
else:
function_list = range(2, len_of_list)
package.make_table_header(table_file, function_list)
package.make_table_header(scanning_table_file, function_list)
number_of_ones_in_experiments = 0
number_of_zeros_in_experiments = 0
used_dic = {}
final_set_of_patterns = []
overal_test_length = 0
for func_id_1 in function_list:
current_set_of_patterns = []
string = '%10s' %("f_"+str(func_id_1-1)+"|") # -1 to march the number of functions for readability
scanning_string = '%10s' %("f_"+str(func_id_1-1)+"|") # -1 to march the number of functions for readability
scanning_test_f1 = "0"*data_width
for func_id_2 in function_list:
if func_id_1 != func_id_2:
scanning_test_f1_f2 = "0"*data_width
list_of_used_patterns = range(1, number_of_lines+1)
if verbose:
print "------------------------------------------"*3
print "function 1: ", func_id_1-1, "function 2:", func_id_2-1
print "------------------------------------------"*3
counter = 0
list_of_excluded_patterns = copy.deepcopy(current_set_of_patterns)
break_the_loop = False
best_solution = []
best_value = 0
sufficient = check_if_sufficient(function_dict, func_id_1, func_id_2, list_of_excluded_patterns, debug, verbose)
while(counter < number_of_lines):
list_of_ones_in_ands = package.find_most_signifacant_conformity(function_dict, func_id_1, func_id_2, list_of_used_patterns,
list_of_excluded_patterns, sufficient, debug, verbose)
if len(list_of_ones_in_ands.keys())>0:
if verbose:
print "\tmax number of ones:", max(list_of_ones_in_ands.keys())
if max(list_of_ones_in_ands.keys()) == 0:
break
list_of_best_patterns = list_of_ones_in_ands[max(list_of_ones_in_ands.keys())]
if verbose:
print "\tbest patterns in this round:", list_of_best_patterns
for item in list_of_best_patterns:
if type(item) == int:
item = [item]
if verbose:
print "\t----------------------"
print "\ttrying combination: ", list_of_excluded_patterns+list(item)
sufficient = check_if_sufficient(function_dict, func_id_1, func_id_2, list_of_excluded_patterns+list(item), debug, verbose)
if sufficient.count("1") == len(sufficient):
best_solution = copy.deepcopy(list_of_excluded_patterns+list(item))
if verbose:
print "\twe got it!"
break_the_loop = True
break
else:
if verbose:
print "\tnew number of ones :", sufficient.count("1"), "\t\tprevious value:", best_value
if sufficient.count("1") > best_value:
if verbose:
print "\tfound a better solution!"
list_of_excluded_patterns += list(item)
best_solution = copy.deepcopy(list_of_excluded_patterns)
best_value = sufficient.count("1")
break
if break_the_loop:
break
if break_the_loop:
break
counter += 1
else:
break
if verbose:
print "\t------------------------------------------------------------------"
if verbose:
print "best conformity solution for func ", func_id_1-1, " and func ", func_id_2-1, ": ", sufficient, best_solution
if verbose:
print "------------------------------"
for final_pattern in best_solution:
if final_pattern not in current_set_of_patterns:
current_set_of_patterns.append(final_pattern)
string += "\t"+str(sufficient)
for scan_pattern in best_solution:
scanning_test_f1_f2 = format(int(scanning_test_f1_f2, 2) | int(function_dict[scan_pattern][func_id_1], 2), 'b').zfill(data_width)
if redundant_function_reduction:
if (str(func_id_1-1)+"_"+str(func_id_2-1) in package.related_functions.keys()):
number_of_zeros_in_experiments += sufficient.count("0") - package.related_functions[str(func_id_1-1)+"_"+str(func_id_2-1)].count("0")
elif (str(func_id_1-1)+"_*" in package.related_functions.keys()):
number_of_zeros_in_experiments += sufficient.count("0") - package.related_functions[str(func_id_1-1)+"_*"].count("0")
elif ("*_"+str(func_id_2-1) in package.related_functions.keys()):
number_of_zeros_in_experiments += sufficient.count("0") - package.related_functions["*_"+str(func_id_2-1)].count("0")
else:
number_of_zeros_in_experiments += sufficient.count("0")
else:
number_of_zeros_in_experiments += sufficient.count("0")
number_of_ones_in_experiments += sufficient.count("1")
used_dic['{0:03}'.format(func_id_1)+"_"+'{0:03}'.format(func_id_2)] = copy.deepcopy(current_set_of_patterns)
else:
scanning_test_f1_f2 = "0"*data_width
string += "\t"+"x"*data_width
scanning_test_f1 = format(int(scanning_test_f1, 2) | int(scanning_test_f1_f2, 2), 'b').zfill(data_width)
scanning_string += "\t"+str(scanning_test_f1_f2)
#-------------------------------------------------------------------------------
# This part fixes the scanning test results for the current function pair
#-------------------------------------------------------------------------------
scanning_test_f1, best_solution = package.run_scanning_optimization(scanning_test_f1, function_dict, func_id_1, debug, verbose, best_solution)
scanning_string += "\t"+str(scanning_test_f1)
scanning_table_file.write(scanning_string+"\n")
table_file.write(string+"\n")
for k in current_set_of_patterns:
if k not in final_set_of_patterns:
final_set_of_patterns.append(k)
opcode = "{0:04b}".format((func_id_1-2))
for j in current_set_of_patterns:
saf_test_patterns_file.write(function_dict[j][0]+function_dict[j][1]+opcode+"\n")
#overal_test_length += len(list_of_necessary_patterns)
print "reporting test length for functions:"
for func_id_1 in range(2, len_of_list):
max_lenght = 0
for item in used_dic.keys():
if int(item.split("_")[0]) == func_id_1:
if len(used_dic[item])>max_lenght:
max_lenght = len(used_dic[item])
overal_test_length += max_lenght
print "function id: ", func_id_1-1, "\ttest length:", max_lenght
stop_time = time.time()
final_unused_patterns = copy.deepcopy(package.final_un_used_pattern(number_of_lines, final_set_of_patterns))
for item in sorted(final_set_of_patterns):
test_patterns_file.write(str(function_dict[item][0])+""+str(function_dict[item][1])+"\n")
# reports!
package.report_usefull_patterns_per_round(used_dic, len_of_list)
print "overal test length:", overal_test_length
package.print_results(final_set_of_patterns, final_unused_patterns, verbose)
package.print_fault_coverage(number_of_lines, number_of_ones_in_experiments, number_of_zeros_in_experiments)
print "------------------------------------------"*3
print "program took ", str(stop_time-start_time), "seconds"
# closing all files
table_file.close()
scanning_table_file.close()
test_patterns_file.close()
saf_test_patterns_file.close()
|
gpl-3.0
| -8,961,026,154,048,714,000
| 41.09292
| 230
| 0.644276
| false
| 2.910064
| true
| false
| false
|
jreback/pandas
|
pandas/core/indexes/datetimelike.py
|
1
|
28697
|
"""
Base and utility classes for tseries type pandas objects.
"""
from datetime import datetime
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, TypeVar, Union, cast
import numpy as np
from pandas._libs import NaT, Timedelta, iNaT, join as libjoin, lib
from pandas._libs.tslibs import BaseOffset, Resolution, Tick
from pandas._typing import Callable, Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_dtype_equal,
is_integer,
is_list_like,
is_period_dtype,
is_scalar,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
make_wrapped_arith_op,
)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.tools.timedeltas import to_timedelta
if TYPE_CHECKING:
from pandas import CategoricalIndex
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
def _join_i8_wrapper(joinf, with_indexers: bool = True):
"""
Create the join wrapper methods.
"""
# error: 'staticmethod' used with a non-method
@staticmethod # type: ignore[misc]
def wrapper(left, right):
# Note: these only get called with left.dtype == right.dtype
if isinstance(
left, (np.ndarray, DatetimeIndexOpsMixin, ABCSeries, DatetimeLikeArrayMixin)
):
left = left.view("i8")
if isinstance(
right,
(np.ndarray, DatetimeIndexOpsMixin, ABCSeries, DatetimeLikeArrayMixin),
):
right = right.view("i8")
results = joinf(left, right)
if with_indexers:
# dtype should be timedelta64[ns] for TimedeltaIndex
# and datetime64[ns] for DatetimeIndex
dtype = cast(np.dtype, left.dtype).base
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
@inherit_names(
["inferred_freq", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(["mean", "asi8", "freq", "freqstr"], DatetimeLikeArrayMixin)
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
"""
_can_hold_strings = False
_data: Union[DatetimeArray, TimedeltaArray, PeriodArray]
_data_cls: Union[Type[DatetimeArray], Type[TimedeltaArray], Type[PeriodArray]]
freq: Optional[BaseOffset]
freqstr: Optional[str]
_resolution_obj: Resolution
_bool_ops: List[str] = []
_field_ops: List[str] = []
# error: "Callable[[Any], Any]" has no attribute "fget"
hasnans = cache_readonly(
DatetimeLikeArrayMixin._hasnans.fget # type: ignore[attr-defined]
)
_hasnans = hasnans # for index / array -agnostic code
@classmethod
def _simple_new(
cls,
values: Union[DatetimeArray, TimedeltaArray, PeriodArray],
name: Label = None,
):
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
@property
def _is_all_dates(self) -> bool:
return True
# ------------------------------------------------------------------------
# Abstract data attributes
@property
def values(self) -> np.ndarray:
# Note: PeriodArray overrides this to return an ndarray of objects.
return self._data._data
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc and other functions.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result):
return result
attrs = self._get_attributes_dict()
if not is_period_dtype(self.dtype) and attrs["freq"]:
# no need to infer if freq is None
attrs["freq"] = "infer"
return type(self)(result, **attrs)
# ------------------------------------------------------------------------
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
elif other.dtype.kind in ["f", "i", "u", "c"]:
return False
elif not isinstance(other, type(self)):
should_try = False
inferable = self._data._infer_matches
if other.dtype == object:
should_try = other.inferred_type in inferable
elif is_categorical_dtype(other.dtype):
other = cast("CategoricalIndex", other)
should_try = other.categories.inferred_type in inferable
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
return np.array_equal(self.asi8, other.asi8)
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
res = self.get_loc(key)
except (KeyError, TypeError, ValueError):
return False
return bool(
is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res))
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take((), kwargs)
indices = np.asarray(indices, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
result = NDArrayBackedExtensionIndex.take(
self, indices, axis, allow_fill, fill_value, **kwargs
)
if isinstance(maybe_slice, slice):
freq = self._data._get_getitem_freq(maybe_slice)
result._data._freq = freq
return result
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
return tolerance
def tolist(self) -> List:
"""
Return a list of the underlying data.
"""
return list(self.astype(object))
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
if len(i8) and self.is_monotonic_increasing:
# quick check
if i8[0] != iNaT:
return self._data._box_func(i8[0])
if self.hasnans:
if not skipna:
return self._na_value
i8 = i8[~self._isnan]
if not len(i8):
return self._na_value
min_stamp = i8.min()
return self._data._box_func(min_stamp)
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo("int64").max
return i8.argmin()
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
if len(i8) and self.is_monotonic:
# quick check
if i8[-1] != iNaT:
return self._data._box_func(i8[-1])
if self.hasnans:
if not skipna:
return self._na_value
i8 = i8[~self._isnan]
if not len(i8):
return self._na_value
max_stamp = i8.max()
return self._data._box_func(max_stamp)
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
# --------------------------------------------------------------------
# Rendering Methods
def format(
self,
name: bool = False,
formatter: Optional[Callable] = None,
na_rep: str = "NaT",
date_format: Optional[str] = None,
) -> List[str]:
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
def _format_with_header(
self, header: List[str], na_rep: str = "NaT", date_format: Optional[str] = None
) -> List[str]:
return header + list(
self._format_native_types(na_rep=na_rep, date_format=date_format)
)
@property
def _formatter_func(self):
return self._data._formatter()
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq)
attrs.append(("freq", freq))
return attrs
def _summary(self, name=None) -> str:
"""
Return a summarized representation.
Parameters
----------
name : str
Name to use in the summary representation.
Returns
-------
str
Summarized representation of the index.
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = f", {formatter(self[0])} to {formatter(self[-1])}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
result = f"{name}: {len(self)} entries{index_summary}"
if self.freq:
result += f"\nFreq: {self.freqstr}"
# display as values, not quoted
result = result.replace("'", "")
return result
# --------------------------------------------------------------------
# Indexing Methods
def _validate_partial_date_slice(self, reso: Resolution):
raise NotImplementedError
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
raise NotImplementedError
def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
):
"""
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
"""
self._validate_partial_date_slice(reso)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if len(self) and (
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
left = vals.searchsorted(unbox(t1), side="left")
right = vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
lhs_mask = vals >= unbox(t1)
rhs_mask = vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
# --------------------------------------------------------------------
# Arithmetic Methods
__add__ = make_wrapped_arith_op("__add__")
__sub__ = make_wrapped_arith_op("__sub__")
__radd__ = make_wrapped_arith_op("__radd__")
__rsub__ = make_wrapped_arith_op("__rsub__")
__pow__ = make_wrapped_arith_op("__pow__")
__rpow__ = make_wrapped_arith_op("__rpow__")
__mul__ = make_wrapped_arith_op("__mul__")
__rmul__ = make_wrapped_arith_op("__rmul__")
__floordiv__ = make_wrapped_arith_op("__floordiv__")
__rfloordiv__ = make_wrapped_arith_op("__rfloordiv__")
__mod__ = make_wrapped_arith_op("__mod__")
__rmod__ = make_wrapped_arith_op("__rmod__")
__divmod__ = make_wrapped_arith_op("__divmod__")
__rdivmod__ = make_wrapped_arith_op("__rdivmod__")
__truediv__ = make_wrapped_arith_op("__truediv__")
__rtruediv__ = make_wrapped_arith_op("__rtruediv__")
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
# --------------------------------------------------------------------
# List-like Methods
def _get_delete_freq(self, loc: int):
"""
Find the `freq` for self.delete(loc).
"""
freq = None
if is_period_dtype(self.dtype):
freq = self.freq
elif self.freq is not None:
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
np.asarray(loc, dtype=np.intp), len(self)
)
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
return freq
def _get_insert_freq(self, loc, item):
"""
Find the `freq` for self.insert(loc, item).
"""
value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
if is_period_dtype(self.dtype):
freq = self.freq
elif self.freq is not None:
# freq can be preserved on edge cases
if self.size:
if item is NaT:
pass
elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
else:
# Adding a single item to an empty index may preserve freq
if self.freq.is_on_offset(item):
freq = self.freq
return freq
@doc(NDArrayBackedExtensionIndex.delete)
def delete(self, loc):
result = super().delete(loc)
result._data._freq = self._get_delete_freq(loc)
return result
@doc(NDArrayBackedExtensionIndex.insert)
def insert(self, loc: int, item):
result = super().insert(loc, item)
result._data._freq = self._get_insert_freq(loc, item)
return result
# --------------------------------------------------------------------
# Join/Set Methods
def _get_join_freq(self, other):
"""
Get the freq to attach to the result of a join operation.
"""
if is_period_dtype(self.dtype):
freq = self.freq
else:
self = cast(DatetimeTimedeltaMixin, self)
freq = self.freq if self._can_fast_union(other) else None
return freq
def _wrap_joined_index(self, joined: np.ndarray, other):
assert other.dtype == self.dtype, (other.dtype, self.dtype)
result = super()._wrap_joined_index(joined, other)
result._data._freq = self._get_join_freq(other)
return result
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
try:
return self._data._validate_listlike(keyarr, allow_object=True)
except (ValueError, TypeError):
return com.asarray_tuplesafe(keyarr)
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self.name)
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return False
def is_type_compatible(self, kind: str) -> bool:
return kind in self._data._infer_matches
# --------------------------------------------------------------------
# Set Operation Methods
@Appender(Index.difference.__doc__)
def difference(self, other, sort=None):
new_idx = super().difference(other, sort=sort)._with_freq(None)
return new_idx
def _intersection(self, other: Index, sort=False) -> Index:
"""
intersection specialized to the case with matching dtypes.
"""
other = cast("DatetimeTimedeltaMixin", other)
if len(self) == 0:
return self.copy()._get_reconciled_name_object(other)
if len(other) == 0:
return other.copy()._get_reconciled_name_object(self)
elif not self._can_fast_intersect(other):
result = Index._intersection(self, other, sort=sort)
# We need to invalidate the freq because Index._intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq("infer")
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
result = self[:0]
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left._values[lslice]
# error: Argument 1 to "_simple_new" of "DatetimeIndexOpsMixin" has
# incompatible type "Union[ExtensionArray, Any]"; expected
# "Union[DatetimeArray, TimedeltaArray, PeriodArray]" [arg-type]
result = type(self)._simple_new(left_chunk) # type: ignore[arg-type]
return self._wrap_setop_result(other, result)
def _can_fast_intersect(self: _T, other: _T) -> bool:
if self.freq is None:
return False
elif other.freq != self.freq:
return False
elif not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
return False
elif self.freq.is_anchored():
# this along with matching freqs ensure that we "line up",
# so intersection will preserve freq
return True
elif not len(self) or not len(other):
return False
elif isinstance(self.freq, Tick):
# We "line up" if and only if the difference between two of our points
# is a multiple of our freq
diff = self[0] - other[0]
remainder = diff % self.freq.delta
return remainder == Timedelta(0)
return True
def _can_fast_union(self: _T, other: _T) -> bool:
# Assumes that type(self) == type(other), as per the annotation
# The ability to fast_union also implies that `freq` should be
# retained on union.
if not isinstance(other, type(self)):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
# TODO: do union on the reversed indexes?
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other, sort=None):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
elif sort is False:
# TDIs are not in the "correct" order and we don't want
# to sort but want to remove overlaps
left, right = self, other
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
dates = concat_compat((left._values, right_chunk))
# With sort being False, we can't infer that result.freq == self.freq
# TODO: no tests rely on the _with_freq("infer"); needed?
result = self._shallow_copy(dates)._with_freq("infer")
return result
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right._values[loc:]
dates = concat_compat([left._values, right_chunk])
# The can_fast_union check ensures that the result.freq
# should match self.freq
dates = type(self._data)(dates, freq=self.freq)
result = type(self)._simple_new(dates)
return result
else:
return left
def _union(self, other, sort):
# We are called by `union`, which is responsible for this validation
assert isinstance(other, type(self))
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
result = this._fast_union(other, sort=sort)
if sort is None:
# In the case where sort is None, _can_fast_union
# implies that result.freq should match self.freq
assert result.freq == self.freq, (result.freq, self.freq)
elif result.freq is None:
# TODO: no tests rely on this; needed?
result = result._with_freq("infer")
return result
else:
i8self = Int64Index._simple_new(self.asi8)
i8other = Int64Index._simple_new(other.asi8)
i8result = i8self._union(i8other, sort=sort)
result = type(self)(i8result, dtype=self.dtype, freq="infer")
return result
# --------------------------------------------------------------------
# Join Methods
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique, with_indexers=False
)
def join(
self, other, how: str = "left", level=None, return_indexers=False, sort=False
):
"""
See Index.join
"""
pself, pother = self._maybe_promote(other)
if pself is not self or pother is not other:
return pself.join(
pother, how=how, level=level, return_indexers=return_indexers, sort=sort
)
this, other = self._maybe_utc_convert(other)
return Index.join(
this,
other,
how=how,
level=level,
return_indexers=return_indexers,
sort=sort,
)
def _maybe_utc_convert(self: _T, other: Index) -> Tuple[_T, Index]:
# Overridden by DatetimeIndex
return self, other
# --------------------------------------------------------------------
# List-Like Methods
@Appender(DatetimeIndexOpsMixin.insert.__doc__)
def insert(self, loc, item):
if isinstance(item, str):
# TODO: Why are strings special?
# TODO: Should we attempt _scalar_from_string?
return self.astype(object).insert(loc, item)
return DatetimeIndexOpsMixin.insert(self, loc, item)
|
bsd-3-clause
| -3,249,803,881,970,340,400
| 32.023015
| 88
| 0.552671
| false
| 4.106611
| false
| false
| false
|
ironfroggy/django-mailer
|
mailer/__init__.py
|
1
|
4161
|
VERSION = (0, 2, 0, "dev", 1)
def get_version():
if VERSION[3] == "final":
return "%s.%s.%s" % (VERSION[0], VERSION[1], VERSION[2])
elif VERSION[3] == "dev":
return "%s.%s.%s%s%s" % (VERSION[0], VERSION[1], VERSION[2], VERSION[3], VERSION[4])
else:
return "%s.%s.%s%s" % (VERSION[0], VERSION[1], VERSION[2], VERSION[3])
__version__ = get_version()
PRIORITY_MAPPING = {
"high": "1",
"medium": "2",
"low": "3",
"deferred": "4",
}
# replacement for django.core.mail.send_mail
def send_mail(subject, message, from_email, recipient_list, priority="medium",
fail_silently=False, auth_user=None, auth_password=None, headers=None):
from django.utils.encoding import force_unicode
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
message = force_unicode(message)
if len(subject) > 100:
subject = u"%s..." % subject[:97]
for to_address in recipient_list:
message_obj = Message.objects.create(
to_address=to_address,
from_address=from_email,
subject=subject,
message_body=message,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
def send_html_mail(subject, message, message_html, from_email, recipient_list,
priority="medium", fail_silently=False, auth_user=None,
auth_password=None, headers=None):
"""
Function to queue HTML e-mails
"""
from django.utils.encoding import force_unicode
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
for to_address in recipient_list:
message_obj = Message.objects.create(to_address=to_address,
from_address=from_email,
subject=subject,
message_body=message,
message_body_html=message_html,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
def mail_admins(subject, message, fail_silently=False, priority="medium", headers=None):
from django.utils.encoding import force_unicode
from django.conf import settings
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
subject = settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject)
message = force_unicode(message)
if len(subject) > 100:
subject = u"%s..." % subject[:97]
for name, to_address in settings.ADMINS:
message_obj = Message.objects.create(to_address=to_address,
from_address=settings.SERVER_EMAIL,
subject=subject,
message_body=message,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
def mail_managers(subject, message, fail_silently=False, priority="medium", headers=None):
from django.utils.encoding import force_unicode
from django.conf import settings
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
subject = settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject)
message = force_unicode(message)
if len(subject) > 100:
subject = u"%s..." % subject[:97]
for name, to_address in settings.MANAGERS:
message_obj = Message.objects.create(to_address=to_address,
from_address=settings.SERVER_EMAIL,
subject=subject,
message_body=message,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
|
mit
| -1,367,007,737,760,832,500
| 32.02381
| 92
| 0.604903
| false
| 4.051607
| false
| false
| false
|
neilLasrado/erpnext
|
erpnext/controllers/queries.py
|
1
|
19700
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import erpnext
from frappe.desk.reportview import get_match_cond, get_filters_cond
from frappe.utils import nowdate, getdate
from collections import defaultdict
from erpnext.stock.get_item_details import _get_item_tax_template
from frappe.utils import unique
# searches for active employees
def employee_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
fields = get_fields("Employee", ["name", "employee_name"])
return frappe.db.sql("""select {fields} from `tabEmployee`
where status = 'Active'
and docstatus < 2
and ({key} like %(txt)s
or employee_name like %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, employee_name), locate(%(_txt)s, employee_name), 99999),
idx desc,
name, employee_name
limit %(start)s, %(page_len)s""".format(**{
'fields': ", ".join(fields),
'key': searchfield,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for leads which are not converted
def lead_query(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Lead", ["name", "lead_name", "company_name"])
return frappe.db.sql("""select {fields} from `tabLead`
where docstatus < 2
and ifnull(status, '') != 'Converted'
and ({key} like %(txt)s
or lead_name like %(txt)s
or company_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, lead_name), locate(%(_txt)s, lead_name), 99999),
if(locate(%(_txt)s, company_name), locate(%(_txt)s, company_name), 99999),
idx desc,
name, lead_name
limit %(start)s, %(page_len)s""".format(**{
'fields': ", ".join(fields),
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for customer
def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
fields = ["name", "customer_group", "territory"]
if not cust_master_name == "Customer Name":
fields.append("customer_name")
fields = get_fields("Customer", fields)
searchfields = frappe.get_meta("Customer").get_search_fields()
searchfields = " or ".join([field + " like %(txt)s" for field in searchfields])
return frappe.db.sql("""select {fields} from `tabCustomer`
where docstatus < 2
and ({scond}) and disabled=0
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
idx desc,
name, customer_name
limit %(start)s, %(page_len)s""".format(**{
"fields": ", ".join(fields),
"scond": searchfields,
"mcond": get_match_cond(doctype),
"fcond": get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for supplier
def supplier_query(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
fields = ["name", "supplier_group"]
if not supp_master_name == "Supplier Name":
fields.append("supplier_name")
fields = get_fields("Supplier", fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s) and disabled=0
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
idx desc,
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': ', '.join(fields),
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
company_currency = erpnext.get_company_currency(filters.get('company'))
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2
and account_type in (%s)
and is_group = 0
and company = %s
and account_currency = %s
and `%s` LIKE %s
order by idx desc, name
limit %s, %s""" %
(", ".join(['%s']*len(filters.get("account_type"))), "%s", "%s", searchfield, "%s", "%s", "%s"),
tuple(filters.get("account_type") + [filters.get("company"), company_currency, "%%%s%%" % txt,
start, page_len]))
if not tax_accounts:
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2 and is_group = 0
and company = %s and account_currency = %s and `%s` LIKE %s limit %s, %s""" #nosec
% ("%s", "%s", searchfield, "%s", "%s", "%s"),
(filters.get("company"), company_currency, "%%%s%%" % txt, start, page_len))
return tax_accounts
def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):
conditions = []
#Get searchfields from meta and use in Item Link field query
meta = frappe.get_meta("Item", cached=True)
searchfields = meta.get_search_fields()
if "description" in searchfields:
searchfields.remove("description")
searchfields = searchfields + [field for field in[searchfield or "name", "item_code", "item_group", "item_name"]
if not field in searchfields]
searchfields = " or ".join([field + " like %(txt)s" for field in searchfields])
description_cond = ''
if frappe.db.count('Item', cache=True) < 50000:
# scan description only if items are less than 50000
description_cond = 'or tabItem.description LIKE %(txt)s'
fields = get_fields("Item", ["name", "item_group"])
if "description" in fields:
fields.remove("description")
return frappe.db.sql("""select
{fields},
if(length(tabItem.description) > 40, \
concat(substr(tabItem.description, 1, 40), "..."), description) as description
from tabItem
where tabItem.docstatus < 2
and tabItem.has_variants=0
and tabItem.disabled=0
and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')
and ({scond} or tabItem.item_code IN (select parent from `tabItem Barcode` where barcode LIKE %(txt)s)
{description_cond})
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),
idx desc,
name, item_name
limit %(start)s, %(page_len)s """.format(
fields=', '.join(fields),
key=searchfield,
scond=searchfields,
fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
mcond=get_match_cond(doctype).replace('%', '%%'),
description_cond = description_cond),
{
"today": nowdate(),
"txt": "%%%s%%" % txt,
"_txt": txt.replace("%", ""),
"start": start,
"page_len": page_len
}, as_dict=as_dict)
def bom(doctype, txt, searchfield, start, page_len, filters):
conditions = []
fields = get_fields("BOM", ["name", "item"])
return frappe.db.sql("""select {fields}
from tabBOM
where tabBOM.docstatus=1
and tabBOM.is_active=1
and tabBOM.`{key}` like %(txt)s
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
idx desc, name
limit %(start)s, %(page_len)s """.format(
fields=", ".join(fields),
fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
mcond=get_match_cond(doctype).replace('%', '%%'),
key=searchfield),
{
'txt': '%' + txt + '%',
'_txt': txt.replace("%", ""),
'start': start or 0,
'page_len': page_len or 20
})
def get_project_name(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if filters.get('customer'):
cond = """(`tabProject`.customer = %s or
ifnull(`tabProject`.customer,"")="") and""" %(frappe.db.escape(filters.get("customer")))
fields = get_fields("Project", ["name"])
return frappe.db.sql("""select {fields} from `tabProject`
where `tabProject`.status not in ("Completed", "Cancelled")
and {cond} `tabProject`.name like %(txt)s {match_cond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
idx desc,
`tabProject`.name asc
limit {start}, {page_len}""".format(
fields=", ".join(['`tabProject`.{0}'.format(f) for f in fields]),
cond=cond,
match_cond=get_match_cond(doctype),
start=start,
page_len=page_len), {
"txt": "%{0}%".format(txt),
"_txt": txt.replace('%', '')
})
def get_delivery_notes_to_be_billed(doctype, txt, searchfield, start, page_len, filters, as_dict):
fields = get_fields("Delivery Note", ["name", "customer", "posting_date"])
return frappe.db.sql("""
select %(fields)s
from `tabDelivery Note`
where `tabDelivery Note`.`%(key)s` like %(txt)s and
`tabDelivery Note`.docstatus = 1
and status not in ("Stopped", "Closed") %(fcond)s
and (
(`tabDelivery Note`.is_return = 0 and `tabDelivery Note`.per_billed < 100)
or `tabDelivery Note`.grand_total = 0
or (
`tabDelivery Note`.is_return = 1
and return_against in (select name from `tabDelivery Note` where per_billed < 100)
)
)
%(mcond)s order by `tabDelivery Note`.`%(key)s` asc limit %(start)s, %(page_len)s
""" % {
"fields": ", ".join(["`tabDelivery Note`.{0}".format(f) for f in fields]),
"key": searchfield,
"fcond": get_filters_cond(doctype, filters, []),
"mcond": get_match_cond(doctype),
"start": start,
"page_len": page_len,
"txt": "%(txt)s"
}, {"txt": ("%%%s%%" % txt)}, as_dict=as_dict)
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
cond = ""
if filters.get("posting_date"):
cond = "and (batch.expiry_date is null or batch.expiry_date >= %(posting_date)s)"
batch_nos = None
args = {
'item_code': filters.get("item_code"),
'warehouse': filters.get("warehouse"),
'posting_date': filters.get('posting_date'),
'txt': "%{0}%".format(txt),
"start": start,
"page_len": page_len
}
having_clause = "having sum(sle.actual_qty) > 0"
if filters.get("is_return"):
having_clause = ""
if args.get('warehouse'):
batch_nos = frappe.db.sql("""select sle.batch_no, round(sum(sle.actual_qty),2), sle.stock_uom,
concat('MFG-',batch.manufacturing_date), concat('EXP-',batch.expiry_date)
from `tabStock Ledger Entry` sle
INNER JOIN `tabBatch` batch on sle.batch_no = batch.name
where
batch.disabled = 0
and sle.item_code = %(item_code)s
and sle.warehouse = %(warehouse)s
and (sle.batch_no like %(txt)s
or batch.expiry_date like %(txt)s
or batch.manufacturing_date like %(txt)s)
and batch.docstatus < 2
{cond}
{match_conditions}
group by batch_no {having_clause}
order by batch.expiry_date, sle.batch_no desc
limit %(start)s, %(page_len)s""".format(
cond=cond,
match_conditions=get_match_cond(doctype),
having_clause = having_clause
), args)
return batch_nos
else:
return frappe.db.sql("""select name, concat('MFG-', manufacturing_date), concat('EXP-',expiry_date) from `tabBatch` batch
where batch.disabled = 0
and item = %(item_code)s
and (name like %(txt)s
or expiry_date like %(txt)s
or manufacturing_date like %(txt)s)
and docstatus < 2
{0}
{match_conditions}
order by expiry_date, name desc
limit %(start)s, %(page_len)s""".format(cond, match_conditions=get_match_cond(doctype)), args)
def get_account_list(doctype, txt, searchfield, start, page_len, filters):
filter_list = []
if isinstance(filters, dict):
for key, val in filters.items():
if isinstance(val, (list, tuple)):
filter_list.append([doctype, key, val[0], val[1]])
else:
filter_list.append([doctype, key, "=", val])
elif isinstance(filters, list):
filter_list.extend(filters)
if "is_group" not in [d[1] for d in filter_list]:
filter_list.append(["Account", "is_group", "=", "0"])
if searchfield and txt:
filter_list.append([doctype, searchfield, "like", "%%%s%%" % txt])
return frappe.desk.reportview.execute("Account", filters = filter_list,
fields = ["name", "parent_account"],
limit_start=start, limit_page_length=page_len, as_list=True)
def get_blanket_orders(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select distinct bo.name, bo.blanket_order_type, bo.to_date
from `tabBlanket Order` bo, `tabBlanket Order Item` boi
where
boi.parent = bo.name
and boi.item_code = {item_code}
and bo.blanket_order_type = '{blanket_order_type}'
and bo.company = {company}
and bo.docstatus = 1"""
.format(item_code = frappe.db.escape(filters.get("item")),
blanket_order_type = filters.get("blanket_order_type"),
company = frappe.db.escape(filters.get("company"))
))
@frappe.whitelist()
def get_income_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# income account can be any Credit account,
# but can also be a Asset account with account_type='Income Account' in special circumstances.
# Hence the first condition is an "OR"
if not filters: filters = {}
condition = ""
if filters.get("company"):
condition += "and tabAccount.company = %(company)s"
fields = get_fields("Account", ["name"])
return frappe.db.sql("""select {fields} from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type in ("Income Account", "Temporary"))
and tabAccount.is_group=0
and tabAccount.`{key}` LIKE %(txt)s
{condition} {match_condition}
order by idx desc, name"""
.format(
fields=", ".join(fields),
condition=condition,
match_condition=get_match_cond(doctype),
key=searchfield
), {
'txt': '%' + txt + '%',
'company': filters.get("company", "")
})
@frappe.whitelist()
def get_expense_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
if not filters: filters = {}
condition = ""
if filters.get("company"):
condition += "and tabAccount.company = %(company)s"
fields = get_fields("Account", ["name"])
return frappe.db.sql("""select {fields}, tabAccount.name from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type in ("Expense Account", "Fixed Asset", "Temporary", "Asset Received But Not Billed", "Capital Work in Progress"))
and tabAccount.is_group=0
and tabAccount.docstatus!=2
and tabAccount.{key} LIKE %(txt)s
{condition} {match_condition}"""
.format(
fields=", ".join(['`tabAccount`.{0}'.format(f) for f in fields]),
condition=condition,
key=searchfield,
match_condition=get_match_cond(doctype)
), {
'company': filters.get("company", ""),
'txt': '%' + txt + '%'
})
@frappe.whitelist()
def warehouse_query(doctype, txt, searchfield, start, page_len, filters):
# Should be used when item code is passed in filters.
conditions, bin_conditions = [], []
filter_dict = get_doctype_wise_filters(filters)
sub_query = """ select round(`tabBin`.actual_qty, 2) from `tabBin`
where `tabBin`.warehouse = `tabWarehouse`.name
{bin_conditions} """.format(
bin_conditions=get_filters_cond(doctype, filter_dict.get("Bin"),
bin_conditions, ignore_permissions=True))
fields = get_fields("Warehouse", ["name"])
query = """select {fields},
CONCAT_WS(" : ", "Actual Qty", ifnull( ({sub_query}), 0) ) as actual_qty
from `tabWarehouse`
where
`tabWarehouse`.`{key}` like {txt}
{fcond} {mcond}
order by
`tabWarehouse`.name desc
limit
{start}, {page_len}
""".format(
fields=", ".join(['`tabWarehouse`.{0}'.format(f) for f in fields]),
sub_query=sub_query,
key=searchfield,
fcond=get_filters_cond(doctype, filter_dict.get("Warehouse"), conditions),
mcond=get_match_cond(doctype),
start=start,
page_len=page_len,
txt=frappe.db.escape('%{0}%'.format(txt))
)
return frappe.db.sql(query)
def get_doctype_wise_filters(filters):
# Helper function to seperate filters doctype_wise
filter_dict = defaultdict(list)
for row in filters:
filter_dict[row[0]].append(row)
return filter_dict
@frappe.whitelist()
def get_batch_numbers(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Batch", ["batch_id"])
query = """select %(fields)s from `tabBatch`
where disabled = 0
and (expiry_date >= CURDATE() or expiry_date IS NULL)
and name like %(txt)s"""
flt = {
"fields": ", ".join(fields),
"txt": frappe.db.escape('%{0}%'.format(txt))
}
if filters and filters.get('item'):
query += " and item = %(item)s"
flt.append({
"item": frappe.db.escape(filters.get('item'))
})
return frappe.db.sql(query, flt)
@frappe.whitelist()
def item_manufacturer_query(doctype, txt, searchfield, start, page_len, filters):
item_filters = [
['manufacturer', 'like', '%' + txt + '%'],
['item_code', '=', filters.get("item_code")]
]
fields = get_fields("Item Manufacturer", ["manufacturer", "manufacturer_part_no"])
item_manufacturers = frappe.get_all(
"Item Manufacturer",
fields=fields,
filters=item_filters,
limit_start=start,
limit_page_length=page_len,
as_list=1
)
return item_manufacturers
@frappe.whitelist()
def get_purchase_receipts(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Purchase Receipt", ["name"])
item_filters = [
['Purchase Receipt', 'docstatus', '=', '1'],
['Purchase Receipt', 'name', 'like', '%' + txt + '%'],
['Purchase Receipt Item', 'item_code', '=', filters.get("item_code")]
]
purchase_receipts = frappe.get_all('Purchase Receipt',
fields=fields,
filters=item_filters,
as_list=1
)
return purchase_receipts
@frappe.whitelist()
def get_purchase_invoices(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Purchase Invoice", ["name"])
item_filters =[
['Purchase Invoice', 'docstatus', '=', '1'],
['Purchase Invoice', 'name', 'like', '%' + txt + '%'],
['Purchase Invoice Item', 'item_code', '=', filters.get("item_code")],
]
purchase_invoices = frappe.get_all('Purchase Invoice',
fields=fields,
filters=item_filters,
as_list=1
)
return purchase_invoices
@frappe.whitelist()
def get_tax_template(doctype, txt, searchfield, start, page_len, filters):
item_doc = frappe.get_cached_doc('Item', filters.get('item_code'))
item_group = filters.get('item_group')
taxes = item_doc.taxes or []
while item_group:
item_group_doc = frappe.get_cached_doc('Item Group', item_group)
taxes += item_group_doc.taxes or []
item_group = item_group_doc.parent_item_group
if not taxes:
fields = get_fields("Item Tax Template", ["name"])
return frappe.db.sql(""" SELECT %(fields)s FROM `tabItem Tax Template` """ , {fields: ", ".join(fields)})
else:
args = {
'item_code': filters.get('item_code'),
'posting_date': filters.get('valid_from'),
'tax_category': filters.get('tax_category')
}
taxes = _get_item_tax_template(args, taxes, for_validate=True)
return [(d,) for d in set(taxes)]
def get_fields(doctype, fields=[]):
meta = frappe.get_meta(doctype)
fields.extend(meta.get_search_fields())
if meta.title_field and not meta.title_field.strip() in fields:
fields.insert(1, meta.title_field.strip())
return unique(fields)
|
gpl-3.0
| -5,845,624,524,436,555,000
| 30.980519
| 143
| 0.651421
| false
| 2.983041
| false
| false
| false
|
OctavianLee/Pywechat
|
pywechat/services/wechat_shake.py
|
1
|
20297
|
# -*- coding: utf-8 -*-
from pywechat.services.basic import Basic
class ShakeService(Basic):
"""This class is an implement of the Wechat service of shaking.
All request's urls come from the official documents.
Link: https://mp.weixin.qq.com/wiki/home/index.html
"""
def bind_page(
self,
page_ids, bind, append,
device_id=None, uuid=None, major=None, minor=None):
"""Binds the relations ship between the device and pages.
Link:
https://mp.weixin.qq.com/wiki/12/c8120214ec0ba08af5dfcc0da1a11400.html
Args:
page_ids: the list of page_id.
bind: the mark of binding operation.
0 is to dismiss the relationship.
1 is to build the relationship.
append: the mark of appending operation.
0 is to bestrow the page.
1 is to append the page.
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_ids": page_ids,
"bind": bind,
"append": append
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/device/bindpage'
json_data = self._send_request('post', url, data=data)
return json_data
def upload_material(self, image):
"""Uploads the material for the icon of page.
Formats: jpg, jpeg, png, gif. Size: better 120*120, limit 200*200 px
Link:
https://mp.weixin.qq.com/wiki/5/e997428269ff189d8f9a4b9e177be2d9.html
Args:
image: the file of image. open(image_name, 'rb')
Returns:
the json data.Example:
{
"data": {
"pic_url":
"http://shp.qpic.cn/wechat_shakearound_pic/0/1428377032/120"
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = 'https://api.weixin.qq.com/shakearound/material/add'
files = {'media': image}
json_data = self._send_request('post', url, files=files)
return json_data
def apply_devices(
self,
quantity, apply_reason, comment,
poi_id=None):
"""Applys devices from the wechat.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
quantity: the quantity of devices.(less than 500)
apply_reason: the reason of applying(less than 100 characters)
comment: the coment(less than 15 characters or 30 letters)
poi_id: the id of poin of interest
Returns:
the json data.Example:
{
"data": {
"apply_id": 123,
"device_identifiers":[
{
"device_id":10100,
"uuid":"FDA50693-A4E2-4FB1-AFCF-C6EB07647825",
"major":10001,
"minor":10002
}
]
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = 'https://api.weixin.qq.com/shakearound/device/applyid'
data = {
"quantity": quantity,
"apply_reason": apply_reason,
"comment": comment
}
if poi_id:
data["poi_id"] = poi_id
json_data = self._send_request('post', url, data=data)
return json_data
def update_device(
self,
comment,
device_id=None, uuid=None, major=None, minor=None):
"""Edit the comment of a device.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
comment: the coment(less than 15 characters or 30 letters)
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"comment": comment,
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/device/update'
json_data = self._send_request('post', url, data=data)
return json_data
def bind_location(
self,
poi_id,
device_id=None, uuid=None, major=None, minor=None):
"""Bind the device with a location.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
poi_id: the id of poin of interest
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"poi_id": poi_id
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/device/bindlocation'
json_data = self._send_request('post', url, data=data)
return json_data
def search_device(
self,
device_id=None, uuid=None, major=None, minor=None):
"""Finds the information of a device.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
"devices": [
{
"comment": "",
"device_id": 10097,
"major": 10001,
"minor": 12102,
"page_ids": "15369",
"status": 1,
"poi_id": 0,
"uuid": "FDA50693-A4E2-4FB1-AFCF-C6EB07647825"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
device_identifier = {}
if device_id:
device_identifier = {
"device_id": device_id
}
else:
device_identifier = {
"uuid": uuid,
"major": major,
"minor": minor
}
data = {
"device_identifiers": [device_identifier]
}
url = 'https://api.weixin.qq.com/shakearound/device/search'
json_data = self._send_request('post', url, data=data)
return json_data
def search_devices(
self,
begin, count,
apply_id=None):
"""Finds the information of devices.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
begin: the start number of devices.
count: the number of devices will query.
apply_id: the applicaition number of devices.
Returns:
the json data.Example:
{
"data": {
"devices": [
{
"comment": "",
"device_id": 10097,
"major": 10001,
"minor": 12102,
"page_ids": "15369",
"status": 1,
"poi_id": 0,
"uuid": "FDA50693-A4E2-4FB1-AFCF-C6EB07647825"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"begin": begin,
"count": count
}
if apply_id:
data["apply_id"] = apply_id
url = 'https://api.weixin.qq.com/shakearound/device/search'
json_data = self._send_request('post', url, data=data)
return json_data
def add_page(
self,
title, description, page_url, icon_url,
comment=None):
"""Adds the new page.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
title: the page title(less than 6 characters).
description: the vice title(less than 7 characters).
page_url: the url of page.
icon_url: the url of icon.
comment: the coment(less than 15 characters)
Returns:
the json data.Example:
{
"data": {
"page_id": 28840
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"title": title,
"description": description,
"page_url": page_url,
"icon_url": icon_url
}
if comment:
data["comment"] = comment
url = 'https://api.weixin.qq.com/shakearound/page/add'
json_data = self._send_request('post', url, data=data)
return json_data
def update_page(
self,
page_id, title, description, page_url, icon_url,
comment=None):
"""Edits a page.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
page_id: the id of page.
title: the page title(less than 6 characters).
description: the vice title(less than 7 characters).
page_url: the url of page.
icon_url: the url of icon.
comment: the coment(less than 15 characters)
Returns:
the json data.Example:
{
"data": {
"page_id": 28840
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_id": page_id,
"title": title,
"description": description,
"page_url": page_url,
"icon_url": icon_url
}
if comment:
data["comment"] = comment
url = 'https://api.weixin.qq.com/shakearound/page/update'
json_data = self._send_request('post', url, data=data)
return json_data
def search_page_by_ids(self, page_ids):
"""Finds pages by ids.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
page_ids: the list of page id.
Returns:
the json data.Example:
{
"data": {
"pages": [
{
"comment": "just for test",
"description": "test",
"icon_url": "https://www.baidu.com/img/bd_logo1",
"page_id": 28840,
"page_url": "http://xw.qq.com/testapi1",
"title": "测试1"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_ids": page_ids,
}
url = 'https://api.weixin.qq.com/shakearound/page/search'
json_data = self._send_request('post', url, data=data)
return json_data
def search_page_by_counts(self, begin, count):
"""Finds pages by counts.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
begin: the start number of pages.
count: the number of pages will query.
Returns:
the json data.Example:
{
"data": {
"pages": [
{
"comment": "just for test",
"description": "test",
"icon_url": "https://www.baidu.com/img/bd_logo1",
"page_id": 28840,
"page_url": "http://xw.qq.com/testapi1",
"title": "测试1"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"begin": begin,
"count": count
}
url = 'https://api.weixin.qq.com/shakearound/page/search'
json_data = self._send_request('post', url, data=data)
return json_data
def delete_page(self, page_ids):
"""Deletes pages by ids.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
page_ids: the list of page id.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_ids": page_ids,
}
url = 'https://api.weixin.qq.com/shakearound/page/delete'
json_data = self._send_request('post', url, data=data)
return json_data
def get_shake_info(self, ticket, need_poi=None):
"""Gets the informaiton of shaking.
Gets the information of devices including UUID, major, minor etc.
Link:
https://mp.weixin.qq.com/wiki/3/34904a5db3d0ec7bb5306335b8da1faf.html
Args:
ticket: the ticket of business which can be getted from url.
need_poi: whether it needs to return poi_id.
1 is to return.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"ticket": ticket
}
if need_poi:
data["need_poi"] = need_poi
url = 'https://api.weixin.qq.com/shakearound/user/getshakeinfo'
json_data = self._send_request('post', url, data=data)
return json_data
def device_statistics(
self,
begin_date, end_date,
device_id=None, uuid=None, major=None, minor=None):
"""Gets the statistics of a device.
Link:
https://mp.weixin.qq.com/wiki/0/8a24bcacad40fe7ee98d1573cb8a6764.html
Args:
begin_date: the timestamp of start date
end_date: the timestamp of end date, the max time span is 30 days.
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": [
{
"click_pv": 0,
"click_uv": 0,
"ftime": 1425052800,
"shake_pv": 0,
"shake_uv": 0
}
],
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"begin_date": begin_date,
"end_date": end_date
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/statistics/device'
json_data = self._send_request('post', url, data=data)
return json_data
def page_statistics(self, page_id, begin_date, end_date):
"""Finds the information of a page.
(Link:
https://mp.weixin.qq.com/wiki/0/8a24bcacad40fe7ee98d1573cb8a6764.html)
Args:
begin_date: the timestamp of start date
end_date: the timestamp of end date, the max time span is 30 days.
page_id: the id of page.
Returns:
the json data.Example:
{
"data": [
{
"click_pv": 0,
"click_uv": 0,
"ftime": 1425052800,
"shake_pv": 0,
"shake_uv": 0
}
],
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_id": page_id,
"begin_date": begin_date,
"end_date": end_date
}
url = 'https://api.weixin.qq.com/shakearound/statistics/page'
json_data = self._send_request('post', url, data=data)
return json_data
|
mit
| 5,705,444,596,172,750,000
| 28.836765
| 80
| 0.452708
| false
| 4.232165
| false
| false
| false
|
iwalz/zendserverapi
|
docs/conf.py
|
1
|
7828
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Zend Server API documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 24 01:33:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Zend Server API'
copyright = '2012, Ingo Walz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.2'
# The full version, including alpha/beta/rc tags.
release = '0.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZendServerAPIdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '\usepackage[plainpages=false]',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ZendServerAPI.tex', 'Zend Server API Documentation',
'Ingo Walz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zendserverapi', 'Zend Server API Documentation',
['Ingo Walz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZendServerAPI', 'Zend Server API Documentation',
'Ingo Walz', 'ZendServerAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
bsd-3-clause
| -1,641,784,278,740,480,300
| 31.213992
| 80
| 0.70465
| false
| 3.761653
| true
| false
| false
|
rcanepa/cs-fundamentals
|
python/interview_questions/longest_repeated_substring.py
|
1
|
2128
|
"""Longest repeated substring (or LCP = longest common prefix in a suffix array).
Problem: find the longest repeated substring inside a string.
Steps:
1. Create suffixes. This should be linear in time and space, but it isn't.
Slicing strings in Python (with slice or [a:b]) is a linear operation
with regard to the size of the string. In the end, this implementation
provides a quadratic time O(N^2).
2. Sort suffixes. This should be N * log(N) in time.
3. Find LCP between adjacent suffixes.
Usage:
This script can be use reading data from the standard input. Example:
cat ~/manifesto.txt | python3 -m interview_questions.longest_repeated_substring
"""
import sys
import time
def lcp(s1, s2):
"""Return the length of the longest common prefix
between strings `s1` and `s2`."""
comp = 0
for i in range(min(len(s1), len(s2))):
if s1[i] != s2[i]:
break
comp += 1
return comp
def lrs(text):
"""Return the longest repeated substring using a Suffix Array."""
# Step 1: create the suffixes array.
suffixes = []
for i in range(len(s)):
suffixes.append(s[i:])
# Step 2: sort the suffixes array.
sorted_suffixes = sorted(suffixes)
# Step: find the longest repeated substring.
result = ""
for i in range(len(sorted_suffixes) - 1):
l = lcp(sorted_suffixes[i], sorted_suffixes[i + 1])
if l > len(result):
result = sorted_suffixes[i][:l]
return result
if __name__ == "__main__":
s = ""
t0 = time.time()
for line in sys.stdin:
s += line
t1 = time.time()
print("################################################################################")
print('-> Took {:.3f}ms to read the file.'.format((t1 - t0) * 1000))
t0 = time.time()
r = lrs(s)
t1 = time.time()
print('-> Took {:.3f}ms to find the longest repeated substring the file.'.format((t1 - t0) * 1000))
print("################################################################################")
print("The longest repeated substring is:")
print(r)
|
mit
| 4,694,552,034,413,392,000
| 30.761194
| 103
| 0.569549
| false
| 3.848101
| false
| false
| false
|
marckn/dimerizer
|
dimerizer/forcefield/collect/collectfromtopology.py
|
1
|
2636
|
import dimerizer.forcefield.basic_parsing_tools as parser
import basic_func as basic
def collect_tags(fname, atomlist):
"""
Collect the dimerized atomtypes.
fname is a topology filename, atomlist is
the list of atom INDICES (0 to N-1)
Returns:
tuple with two elements:
1) a list of tags with idx-tag correspondance
2) the list of dimerized tags without repetitions
"""
lfile=parser.filetolist(fname)
asec = parser.get_section(lfile,"atoms")[0]
tags=[]
dtags=[]
for ln in asec[1]:
prs=parser.parse_line(ln)
if prs[0] != "Data":
continue
serial= int(prs[1][0])
tag = prs[1][1]
tags.append(tag)
if serial-1 in atomlist:
dtags.append(tag)
dtags = list(set(dtags))
return (tags,dtags)
def lines_involved(fname,tags, atlist):
"""
For each interaction line return the tags involved by the dimerization.
Return a list of tuples, each tuple contains:
1 - the kind of interaction (angle, dihedral, ...)
2 - the list of tag combinations
Input:
the topology filename
the idx - tag correspondance
the list of atoms to be dimerized
"""
lfile=parser.filetolist(fname)
sec_bonds=parser.get_section(lfile,"bonds")
sec_pairs=parser.get_section(lfile,"pairs")
sec_angles=parser.get_section(lfile,"angles")
sec_dihedrals=parser.get_section(lfile,"(dihedrals|impropers)")
sec_cmap=parser.get_section(lfile,"cmap")
rval=[]
l1 = basic.ffentries(sec_bonds,tags,atlist,2)
if not l1 is None:
rval.append(l1)
l2 = basic.ffentries(sec_pairs,tags,atlist,2)
if not l2 is None:
rval.append(l2)
l3 = basic.ffentries(sec_angles,tags,atlist,3)
if not l3 is None:
rval.append(l3)
l4 = basic.ffentries(sec_dihedrals,tags,atlist,4)
if not l4 is None:
rval.append(l4)
l5 = basic.ffentries(sec_cmap,tags,atlist,5)
if not l5 is None:
rval.append(l5)
return rval
def dihedral_lines(fname,tags):
"""
For each dihedral interaction line return the tags.
Return a list of tuples, each tuple contains:
1 - the kind of interaction (angle, dihedral, ...) - for conformity
2 - the list of tag combinations
Input:
the topology filename
the idx - tag correspondance
"""
lfile=parser.filetolist(fname)
sec_dihedrals=parser.get_section(lfile,"(dihedrals|impropers)")
rval=[]
l4 = basic.ffentries(sec_dihedrals,tags,range(0,len(tags)),4)
if not l4 is None:
rval.append(l4)
return rval
|
gpl-3.0
| -1,301,878,778,461,048,800
| 22.963636
| 74
| 0.638088
| false
| 3.222494
| false
| false
| false
|
masschallenge/django-accelerator
|
accelerator_abstract/models/base_startup_mentor_relationship.py
|
1
|
1538
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from django.conf import settings
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
CONFIRMED_RELATIONSHIP = "Confirmed"
DESIRED_RELATIONSHIP = "Desired"
DISCUSSING_RELATIONSHIP = "In Discussions With"
RELATIONSHIP_CHOICES = ((CONFIRMED_RELATIONSHIP, CONFIRMED_RELATIONSHIP),
(DISCUSSING_RELATIONSHIP, DISCUSSING_RELATIONSHIP),
(DESIRED_RELATIONSHIP, DESIRED_RELATIONSHIP))
class BaseStartupMentorRelationship(AcceleratorModel):
startup_mentor_tracking = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"StartupMentorTrackingRecord"),
on_delete=models.CASCADE)
mentor = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
status = models.CharField(
max_length=32,
choices=RELATIONSHIP_CHOICES,
default=DESIRED_RELATIONSHIP)
primary = models.BooleanField(default=False)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_startupmentorrelationship'
abstract = True
verbose_name_plural = 'Startup Mentor Relationships'
def __str__(self):
name = "Relationship of %s to %s" % (
self.startup_mentor_tracking.startup.name,
self.mentor.get_profile().full_name()
)
return name
|
mit
| 2,679,707,739,714,140,000
| 34.767442
| 75
| 0.676853
| false
| 3.73301
| false
| false
| false
|
cogu/py-apx
|
util/apx_split.py
|
1
|
3475
|
#!/usr/bin/env python3
import os, sys
import apx
import argparse
def parse_lines_in_file(path):
"""
Parses text file path line by line and returns a list of names found in it.
The special character '#' can be used as a comment character and allows users to write line comments.
Comments does not affect what this function returns.
"""
signals = []
with open(path) as fp:
for line in fp:
# removes all text comments starting with # character
parts = line.partition('#')
line = parts[0]
# removes starting and ending whitespace
line = line.strip()
if len(line) > 0:
signals.append(line)
return signals
def create_apx_node_from_file_name(file_name, default_name):
if file_name is None:
node_name = default_name
else:
node_name = os.path.basename(file_name)
if '.apx' in node_name:
node_name = os.path.splitext(node_name)[0]
return apx.Node(node_name)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('input_file', help='The APX file to split (.apx)')
arg_parser.add_argument('port_names', nargs='*', help="Port names to be included in head")
arg_parser.add_argument('--head', help='APX File to write head result (.apx)', default=None)
arg_parser.add_argument('--tail', help='APX file to write tail result (.apx)', default=None)
arg_parser.add_argument('--head_name', help='Force new name of head APX node', default='Head')
arg_parser.add_argument('--tail_name', help='Force new name of head APX node', default='Tail')
arg_parser.add_argument('--file', help='Read port names from file instead', default=None)
arg_parser.add_argument('--sort', help='Name of the new APX node', action='store_true', default=False)
arg_parser.add_argument('--mirror', help='Forces output of head and tail to be mirrored', action='store_true', default=False)
args = arg_parser.parse_args()
if args.file is None and len(args.port_names)==0:
arg_parser.print_help()
sys.exit(1)
head_node = create_apx_node_from_file_name(args.head, args.head_name)
if args.tail is not None:
tail_node = create_apx_node_from_file_name(args.tail, args.tail_name)
else:
tail_node = None
source_node = apx.Parser().parse(args.input_file)
if args.file is not None:
port_names = parse_lines_in_file(args.file)
else:
port_names = args.port_names
processed = set()
for name in port_names:
source_port = source_node.find(name)
if (source_port is not None) and (source_port.name not in processed):
processed.add(source_port.name)
head_node.add_port_from_node(source_node, source_port)
if args.mirror:
head_node=head_node.mirror()
head_node.finalize(args.sort)
if args.head is not None:
head_node.save_apx(output_file=args.head, normalized=True)
else:
print(head_node.dumps(normalized=True))
if tail_node is not None:
if args.mirror:
tail_node=tail_node.mirror()
head_node.finalize(args.sort)
for source_port in source_node.providePorts+source_node.requirePorts:
if source_port.name not in processed:
tail_node.add_port_from_node(source_node, source_port)
tail_node.save_apx(output_file=args.tail, normalized=True)
|
mit
| -8,647,016,750,354,452,000
| 38.044944
| 129
| 0.641151
| false
| 3.564103
| false
| false
| false
|
energicryptocurrency/energi
|
qa/rpc-tests/rest.py
|
1
|
15355
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 200 because we are within the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
mit
| -6,753,828,709,250,829,000
| 44.428994
| 132
| 0.616346
| false
| 3.619755
| true
| false
| false
|
baykovr/rukovod
|
rukovod.py
|
1
|
6576
|
#!/usr/bin/env python
# Texas A&M University
# Department of Computer Science and Engineering
# Robert A. Baykov
import sys,datetime,time,csv,os,argparse,smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from rukovod_datatypes import Course,Section,Student
# TODO: gpg signing
USE_CRYPTO = False
# number of seconds to wait in between sending emails
TIME_DELAY = 1
# email from field, ie something@something.edu
EMAIL_FROM = 'REDACTED'
# email authentications, ie password for something@something.edu
EMAIL_AUTH = 'REDACTED'
# email subject
EMAIL_SUBJ = 'REDACTED'
# default, our load balanced smtp relay
SMTP_RELAY = "smtp-relay.tamu.edu"
SMTP_RELAY_PORT = 25
# -- from toolbox.py http://github.com/baykovr/toolbox
# -- Some common functions
def f_as_list(filename):
# Note: will strip out new line characters
# Return file contents as a list
# each line is a new item
try:
line_list = []
fp = open(filename)
for line in fp:
line = line.strip('\r\n')
line_list.append(line)
return line_list
except Exception, e:
print '[ ! ] in f_getlist',e
return -1
def pipe_cmd(command):
try:
return os.popen(command).read()
except Exception as e:
print e
return -1
def cmd(cmd):
print 'Trying to exec:',cmd
try:
suppression = "&>/dev/null"
return os.system(cmd)
except Exception as e:
print e
return -1
def send_mail(fromaddr,toaddr,subject,body,username,password):
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(SMTP_RELAY, SMTP_RELAY_PORT)
server.ehlo()
server.starttls()
server.ehlo()
# smtp credentials
server.login(username, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
# -- This parses the course file
# the course file holds some course meta information
# Line 1 is the course name
# subsequent lines after that are sections (multiple)
# COURSE CSCE-999
# SECTION 500-600 TR 08:00-09:10 HRBB-999 rosters/roster.csv
def init_course_file(course_file):
COURSE = None
info = f_as_list(course_file)
for line in info:
line = line.split(' ')
if line[0] == 'COURSE':
COURSE = Course(line[1])
elif line[0] == 'SECTION':
COURSE.add(Section(line[1],line[2],line[3],line[4],line[5]))
print 'Loaded '
print 'Course : ',COURSE.name
print 'Sections: ',
# Now you can do some actions, such as dump all emails
# for section in COURSE.sections:
# print '=== SECTION:',section.Number
# for username in section.students:
# #print username,
# print section.students[username].Email,','
return COURSE
# -- MAIL / CRYPTO --
def mail_unsigned_feedback(dst_addr,feedback):
print 'mailing'
print 'UNCOMMENT rukovod.py@110 to actually send.'
#send_mail(EMAIL_FROM,dst_addr,EMAIL_SUBJ,feedback,EMAIL_FROM,EMAIL_AUTH)
def mail_signed_feedback(dst_addr,feedback):
print 'mailing-signed'
#TODO GPG
# The generic gradebook file has arbitrary columns
# Markup
def process_generic_grades_file(grades_file):
email_list = []
print '[...] FILE :',grades_file
print '[ ! ] WARNING: always double check email / roster records against this csv before mailing.'
ok = raw_input('[ ? ] continue (y/N):')
if ok.lower() != 'y':
print 'Exiting.'
return
try:
f = open(grades_file, 'rb')
reader = csv.reader(f)
header = ''
total_rows = 0
for row in reader:
if total_rows == 0:
header = row
# -- Header --
header_row_index = 0
for header_row in header:
if 'email' in header_row.lower():
email_dst_index = header_row_index
break
header_row_index+=1
# If no such column found offer debug and exit
if email_dst_index == -1:
print '\n[ ! ] could not locate an email address column'
nok = raw_input('[ ? ] show checked columns (y/N):')
if nok.lower() == 'y':
header_row_index=0
for header_row in header:
print '\t[',header_row_index,']',header_row
header_row_index+=1
print 'Check columns, Exiting.'
return
# -- /Header --
# -- Data Rows --
else:
# Construct Email Body
# Column : Data
# Column : Date
# etc ...
email_body = ''
email_dest = row[email_dst_index]
email_body += 'BEGIN-MESSAGE'+'*'*40 +'\n'
for i in range(0,len(header)):
email_body += header[i].ljust(12) + ' ' + row[i] + '\n'
email_body += 'END-MESSAGE'+'*'*42+'\n'
email_list.append( (email_dest,email_body) )
# -- /Data Rows --
total_rows+=1
# Check
if total_rows-1 == 0:
print '[ ! ] 0 rows found, nothing to do.'
print '[...] total entries extracted:',total_rows-1 # minus header
print '[...] estimated time to send :',TIME_DELAY*total_rows-1,'(seconds)'
if len(email_list) > 0:
ok = raw_input('[ ? ] preview first message (y/N)')
if ok.lower() == 'y':
print 'DESTINATION:',email_list[0][0]
print email_list[0][1]
ok = raw_input('\n[ ! ] SEND ALL MAIL (y/N)')
if ok.lower() == 'y':
# MAIL-AWAY
for email in email_list:
# Dump to stdout for record
print 'MAILING',datetime.datetime.now()
print 'DESTINATION:',email[0]
print email[1]
# Mail
if USE_CRYPTO == True:
mail_signed_feedback(email[0],email[1])
else:
mail_unsigned_feedback(email[0],email[1])
# Wait
time.sleep(TIME_DELAY)
else:
print 'Exiting.'
return
else:
print '[ ! ] no mail to send, exiting.'
except Exception as e:
print '[ ! ]',e
exit(1)
finally:
f.close()
if __name__ == "__main__":
try:
# TODO, PGP
pass
except Exception as e:
print '[ ! ]',e
ok = raw_input('[ ? ] continue without crypto (y/N):')
if ok.lower() == 'y':
USE_CRYPTO = False
else:
print 'Exiting.'
exit(0)
# Parse Args
parser = argparse.ArgumentParser(description='rukovod')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c','--course',help='Course file',type=str)
group.add_argument('-g','--grades',help='Grades file',type=str)
arguments = parser.parse_args()
if arguments.course:
COURSE = init_course_file(arguments.course)
elif arguments.grades:
process_generic_grades_file(arguments.grades)
else:
print '-c / --course COURSE_FILE'
print '-g / --grades GRADES_FILE'
|
gpl-3.0
| 2,644,757,180,881,620,500
| 24.417671
| 99
| 0.621502
| false
| 2.994536
| false
| false
| false
|
bigswitch/nova
|
nova/tests/unit/api/openstack/compute/test_flavors.py
|
1
|
24823
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six.moves.urllib.parse as urlparse
import webob
from nova.api.openstack import common
from nova.api.openstack.compute import flavors as flavors_v21
import nova.compute.flavors
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"ephemeral_gb": '20',
"swap": '10',
"disabled": False,
"vcpus": '',
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '20',
"ephemeral_gb": '10',
"swap": '5',
"disabled": False,
"vcpus": '',
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if marker in ['99999']:
raise exception.MarkerNotFound(marker)
def reject_min(db_attr, filter_attr):
return (filter_attr in filters and
int(flavor[db_attr]) < int(filters[filter_attr]))
filters = filters or {}
res = []
for (flavor_name, flavor) in FAKE_FLAVORS.items():
if reject_min('memory_mb', 'min_memory_mb'):
continue
elif reject_min('root_gb', 'min_root_gb'):
continue
res.append(flavor)
res = sorted(res, key=lambda item: item[sort_key])
output = []
marker_found = True if marker is None else False
for flavor in res:
if not marker_found and marker == flavor['flavorid']:
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(flavor)
return output
def fake_get_limit_and_marker(request, max_limit=1):
params = common.get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
marker = params.get('marker')
return limit, marker
def empty_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return []
def return_flavor_not_found(flavor_id, ctxt=None):
raise exception.FlavorNotFound(flavor_id=flavor_id)
class FlavorsTestV21(test.TestCase):
_prefix = "/v2/fake"
Controller = flavors_v21.FlavorsController
fake_request = fakes.HTTPRequestV21
_rspv = "v2/fake"
_fake = "/fake"
def setUp(self):
super(FlavorsTestV21, self).setUp()
self.flags(osapi_compute_extension=[])
fakes.stub_out_networking(self)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(nova.compute.flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
self.controller = self.Controller()
def _set_expected_body(self, expected, ephemeral, swap, disabled):
# NOTE(oomichi): On v2.1 API, some extensions of v2.0 are merged
# as core features and we can get the following parameters as the
# default.
expected['OS-FLV-EXT-DATA:ephemeral'] = ephemeral
expected['OS-FLV-DISABLED:disabled'] = disabled
expected['swap'] = swap
def test_get_flavor_by_invalid_id(self):
self.stubs.Set(nova.compute.flavors,
"get_flavor_by_flavor_id",
return_flavor_not_found)
req = self.fake_request.blank(self._prefix + '/flavors/asdf')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'asdf')
def test_get_flavor_by_id(self):
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
}
self._set_expected_body(expected['flavor'], ephemeral='20',
swap='10', disabled=False)
self.assertEqual(flavor, expected)
def test_get_flavor_with_custom_link_prefix(self):
self.flags(osapi_compute_link_prefix='http://zoo.com:42',
osapi_glance_link_prefix='http://circus.com:34')
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://zoo.com:42/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://zoo.com:42" + self._fake +
"/flavors/1",
},
],
},
}
self._set_expected_body(expected['flavor'], ephemeral='20',
swap='10', disabled=False)
self.assertEqual(expected, flavor)
def test_get_flavor_list(self):
req = self.fake_request.blank(self._prefix + '/flavors')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_with_marker(self):
self.maxDiff = None
url = self._prefix + '/flavors?limit=1&marker=1'
req = self.fake_request.blank(url)
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
'flavors_links': [
{'href': 'http://localhost/' + self._rspv +
'/flavors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertThat(flavor, matchers.DictMatches(expected))
def test_get_flavor_list_with_invalid_marker(self):
req = self.fake_request.blank(self._prefix + '/flavors?marker=99999')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_detail_with_limit(self):
url = self._prefix + '/flavors/detail?limit=1'
req = self.fake_request.blank(url)
response = self.controller.detail(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
]
self._set_expected_body(expected_flavors[0], ephemeral='20',
swap='10', disabled=False)
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['1'], 'marker': ['1']},
matchers.DictMatches(params))
def test_get_flavor_with_limit(self):
req = self.fake_request.blank(self._prefix + '/flavors?limit=2')
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
}
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['2']},
matchers.DictMatches(params))
def test_get_flavor_with_default_limit(self):
self.stubs.Set(common, "get_limit_and_marker",
fake_get_limit_and_marker)
self.flags(osapi_max_limit=1)
req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
}
]
}
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['1']},
matchers.DictMatches(params))
def test_get_flavor_list_detail(self):
req = self.fake_request.blank(self._prefix + '/flavors/detail')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self._set_expected_body(expected['flavors'][0], ephemeral='20',
swap='10', disabled=False)
self._set_expected_body(expected['flavors'][1], ephemeral='10',
swap='5', disabled=False)
self.assertEqual(expected, flavor)
def test_get_empty_flavor_list(self):
self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
empty_get_all_flavors_sorted_list)
req = self.fake_request.blank(self._prefix + '/flavors')
flavors = self.controller.index(req)
expected = {'flavors': []}
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram(self):
# Flavor lists may be filtered by minRam.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=512')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_ram(self):
# Ensure you cannot list flavors with invalid minRam param.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_filter_min_disk(self):
# Flavor lists may be filtered by minDisk.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=20')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_disk(self):
# Ensure you cannot list flavors with invalid minDisk param.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_detail_min_ram_and_min_disk(self):
"""Tests that filtering work on flavor details and that minRam and
minDisk filters can be combined
"""
req = self.fake_request.blank(self._prefix + '/flavors/detail'
'?minRam=256&minDisk=20')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self._set_expected_body(expected['flavors'][0], ephemeral='10',
swap='5', disabled=False)
self.assertEqual(expected, flavor)
class DisabledFlavorsWithRealDBTestV21(test.TestCase):
"""Tests that disabled flavors should not be shown nor listed."""
Controller = flavors_v21.FlavorsController
_prefix = "/v2"
fake_request = fakes.HTTPRequestV21
def setUp(self):
super(DisabledFlavorsWithRealDBTestV21, self).setUp()
# Add a new disabled type to the list of flavors
self.req = self.fake_request.blank(self._prefix + '/flavors')
self.context = self.req.environ['nova.context']
self.admin_context = context.get_admin_context()
self.disabled_type = self._create_disabled_instance_type()
self.addCleanup(self.disabled_type.destroy)
self.inst_types = objects.FlavorList.get_all(self.admin_context)
self.controller = self.Controller()
def _create_disabled_instance_type(self):
flavor = objects.Flavor(context=self.admin_context,
name='foo.disabled', flavorid='10.disabled',
memory_mb=512, vcpus=2, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=1.0,
vcpu_weight=1, disabled=True, is_public=True,
extra_specs={}, projects=[])
flavor.create()
return flavor
def test_index_should_not_list_disabled_flavors_to_user(self):
self.context.is_admin = False
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids - set([disabled_flavorid]),
api_flavorids)
def test_index_should_list_disabled_flavors_to_admin(self):
self.context.is_admin = True
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids, api_flavorids)
def test_show_should_include_disabled_flavor_for_user(self):
"""Counterintuitively we should show disabled flavors to all users and
not just admins. The reason is that, when a user performs a server-show
request, we want to be able to display the pretty flavor name ('512 MB
Instance') and not just the flavor-id even if the flavor id has been
marked disabled.
"""
self.context.is_admin = False
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
def test_show_should_include_disabled_flavor_for_admin(self):
self.context.is_admin = True
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
class ParseIsPublicTestV21(test.TestCase):
Controller = flavors_v21.FlavorsController
def setUp(self):
super(ParseIsPublicTestV21, self).setUp()
self.controller = self.Controller()
def assertPublic(self, expected, is_public):
self.assertIs(expected, self.controller._parse_is_public(is_public),
'%s did not return %s' % (is_public, expected))
def test_None(self):
self.assertPublic(True, None)
def test_truthy(self):
self.assertPublic(True, True)
self.assertPublic(True, 't')
self.assertPublic(True, 'true')
self.assertPublic(True, 'yes')
self.assertPublic(True, '1')
def test_falsey(self):
self.assertPublic(False, False)
self.assertPublic(False, 'f')
self.assertPublic(False, 'false')
self.assertPublic(False, 'no')
self.assertPublic(False, '0')
def test_string_none(self):
self.assertPublic(None, 'none')
self.assertPublic(None, 'None')
def test_other(self):
self.assertRaises(
webob.exc.HTTPBadRequest, self.assertPublic, None, 'other')
|
apache-2.0
| 774,215,699,162,965,000
| 35.666174
| 79
| 0.457237
| false
| 4.444584
| true
| false
| false
|
flux3dp/fluxghost
|
fluxghost/api/discover.py
|
1
|
4822
|
from time import time
import logging
import json
from fluxghost import g
logger = logging.getLogger("API.DISCOVER")
def get_online_message(source, device):
st = None
doc = {
"uuid": device.uuid.hex,
"alive": True,
"source": source,
"serial": device.serial,
"version": str(device.version),
"model": device.model_id,
}
if source == "lan":
doc.update({
"name": device.name,
"ipaddr": device.ipaddr,
"password": device.has_password,
})
st = device.status
elif source == "h2h":
st = device.device_status
doc.update({
"name": device.nickname,
"addr": device.addr,
})
else:
st = {}
doc.update({
"st_ts": st.get("st_ts"),
"st_id": st.get("st_id"),
"st_prog": st.get("st_prog"),
"head_module": st.get("st_head", st.get("head_module")),
"error_label": st.get("st_err", st.get("error_label"))
})
return doc
def get_offline_message(source, device=None, uuid=None):
return {
"uuid": device.uuid.hex if device else uuid.hex,
"alive": False,
"source": source
}
def discover_api_mixin(cls):
class DiscoverApi(cls):
def __init__(self, *args):
super().__init__(*args)
self.lan_alive_devices = set()
self.usb_alive_addr = {}
self.server.discover_devices.items()
self.POOL_TIME = 1.0
def review_lan_devices(self):
t = time()
with self.server.discover_mutex:
for uuid, device in self.server.discover_devices.items():
if t - device.last_update > 30:
# Dead devices
if uuid in self.lan_alive_devices:
self.lan_alive_devices.remove(uuid)
self.send_text(self.build_dead_response("lan",
device))
else:
# Alive devices
self.lan_alive_devices.add(uuid)
self.send_text(self.build_response("lan", device))
def review_usb_devices(self):
rmlist = []
for addr, uuid in self.usb_alive_addr.items():
usbprotocol = g.USBDEVS.get(addr)
if usbprotocol and usbprotocol.uuid == uuid:
pass
else:
rmlist.append(addr)
self.send_text(self.build_dead_response("h2h", uuid=uuid))
for addr in rmlist:
self.usb_alive_addr.pop(addr)
for addr, usbdevice in g.USBDEVS.items():
if addr not in self.usb_alive_addr:
self.usb_alive_addr[addr] = usbdevice.uuid
self.send_text(self.build_response("h2h", usbdevice))
def on_review_devices(self):
self.review_lan_devices()
self.review_usb_devices()
def on_text_message(self, message):
try:
payload = json.loads(message)
except Exception as e:
self.traceback("BAD_PARAMS")
return
cmd = payload.get("cmd")
if cmd == "poke":
try:
self.server.discover.poke(payload["ipaddr"])
except OSError as e:
pass
except Exception as e:
logger.error("Poke error: %s", repr(e))
elif cmd == "poketcp":
try:
self.server.discover.add_poketcp_ipaddr(payload["ipaddr"])
except OSError as e:
pass
except Exception as e:
logger.error("Poke TCP error: %s", repr(e))
elif cmd == 'testtcp':
print(payload["ipaddr"])
try:
self.server.discover.test_poketcp_ipaddr(payload["ipaddr"])
except OSError as e:
pass
except Exception as e:
logger.error("Test TCP error: %s", repr(e))
else:
self.send_error("L_UNKNOWN_COMMAND")
def on_loop(self):
self.on_review_devices()
self.POOL_TIME = min(self.POOL_TIME + 1.0, 3.0)
def on_closed(self):
pass
def build_dead_response(self, source, device=None, uuid=None):
return json.dumps(
get_offline_message(source, device=device, uuid=uuid))
def build_response(self, source, device):
return json.dumps(get_online_message(source, device))
return DiscoverApi
|
agpl-3.0
| 3,847,416,626,889,435,600
| 31.362416
| 79
| 0.48652
| false
| 4.20401
| false
| false
| false
|
rocky/python3-trepan
|
trepan/lib/printing.py
|
1
|
4182
|
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2010, 2015, 2020 Rocky Bernstein
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect, types
def print_dict(s, obj, title):
if hasattr(obj, "__dict__"):
obj = obj.__dict__
pass
if isinstance(obj, dict):
s += "\n%s:\n" % title
keys = list(obj.keys())
keys.sort()
for key in keys:
s += " %s:\t%s\n" % (repr(key), obj[key])
pass
pass
return s
def print_argspec(obj, obj_name):
"""A slightly decorated version of inspect.format_argspec"""
try:
return obj_name + inspect.formatargspec(*inspect.getargspec(obj))
except:
return None
return # Not reached
def print_obj(arg, frame, format=None, short=False):
"""Return a string representation of an object """
try:
if not frame:
# ?? Should we have set up a dummy globals
# to have persistence?
obj = eval(arg, None, None)
else:
obj = eval(arg, frame.f_globals, frame.f_locals)
pass
except:
return 'No symbol "' + arg + '" in current context.'
# format and print
what = arg
if format:
what = format + " " + arg
obj = printf(obj, format)
s = "%s = %s" % (what, obj)
if not short:
s += "\ntype = %s" % type(obj)
if callable(obj):
argspec = print_argspec(obj, arg)
if argspec:
s += ":\n\t"
if inspect.isclass(obj):
s += "Class constructor information:\n\t"
obj = obj.__init__
elif isinstance(obj, types.InstanceType):
obj = obj.__call__
pass
s += argspec
pass
# Try to list the members of a class.
# Not sure if this is correct or the
# best way to do.
s = print_dict(s, obj, "object variables")
if hasattr(obj, "__class__"):
s = print_dict(s, obj.__class__, "class variables")
pass
return s
pconvert = {"c": chr, "x": hex, "o": oct, "f": float, "s": str}
twos = (
"0000",
"0001",
"0010",
"0011",
"0100",
"0101",
"0110",
"0111",
"1000",
"1001",
"1010",
"1011",
"1100",
"1101",
"1110",
"1111",
)
def printf(val, fmt):
global pconvert, twos
if not fmt:
fmt = " " # not 't' nor in pconvert
# Strip leading '/'
if fmt[0] == "/":
fmt = fmt[1:]
f = fmt[0]
if f in pconvert.keys():
try:
return pconvert[f](val)
except:
return str(val)
# binary (t is from 'twos')
if f == "t":
try:
res = ""
while val:
res = twos[val & 0xF] + res
val = val >> 4
return res
except:
return str(val)
return str(val)
if __name__ == "__main__":
print(print_dict("", globals(), "my globals"))
print("-" * 40)
print(print_obj("print_obj", None))
print("-" * 30)
print(print_obj("Exception", None))
print("-" * 30)
print(print_argspec("Exception", None))
class Foo:
def __init__(self, bar=None):
pass
pass
print(print_obj("Foo.__init__", None))
print("-" * 30)
print(print_argspec(Foo.__init__, "__init__"))
assert printf(31, "/o") == "037"
assert printf(31, "/t") == "00011111"
assert printf(33, "/c") == "!"
assert printf(33, "/x") == "0x21"
|
gpl-3.0
| 5,689,020,719,061,596,000
| 25.980645
| 73
| 0.518173
| false
| 3.658793
| false
| false
| false
|
chennan47/OSF-Offline
|
start.py
|
1
|
1174
|
import sys
from PyQt5.QtWidgets import QApplication, QMessageBox, QSystemTrayIcon
from osfoffline import utils
from osfoffline.application.main import OSFApp
from osfoffline.database_manager.db import drop_db
def running_warning():
warn_app = QApplication(sys.argv)
QMessageBox.information(
None,
"Systray",
"OSF-Offline is already running. Check out the system tray."
)
warn_app.quit()
sys.exit(0)
def start():
# Start logging all events
if '--drop' in sys.argv:
drop_db()
utils.start_app_logging()
if sys.platform == 'win32':
from server import SingleInstance
single_app = SingleInstance()
if single_app.already_running():
running_warning()
app = QApplication(sys.argv)
if not QSystemTrayIcon.isSystemTrayAvailable():
QMessageBox.critical(
None,
"Systray",
"Could not detect a system tray on this system"
)
sys.exit(1)
QApplication.setQuitOnLastWindowClosed(False)
osf = OSFApp()
osf.start()
osf.hide()
sys.exit(app.exec_())
if __name__ == "__main__":
start()
|
apache-2.0
| 8,891,531,229,938,620,000
| 20.345455
| 70
| 0.626065
| false
| 3.738854
| false
| false
| false
|
onedata/cluster-example
|
bamboos/docker/package.py
|
1
|
3382
|
#!/usr/bin/env python
# coding=utf-8
"""Author: Tomasz Lichon
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Build packages in dockerized environment, as user 'package'
Run the script with -h flag to learn about script's running options.
"""
from os.path import expanduser
import argparse
import os
import sys
from environment import docker
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Run a command inside a dockerized development environment.')
parser.add_argument(
'-i', '--image',
action='store',
default='onedata/builder:v25',
help='docker image to use for building',
dest='image')
parser.add_argument(
'-s', '--src',
action='store',
default=os.getcwd(),
help='source directory to run command from',
dest='src')
parser.add_argument(
'-d', '--dst',
action='store',
default=None,
help='destination directory where the build will be stored; defaults '
'to source dir if unset',
dest='dst')
parser.add_argument(
'-k', '--keys',
action='store',
default=expanduser("~/.ssh"),
help='directory of ssh keys used for dependency fetching',
dest='keys')
parser.add_argument(
'-r', '--reflect-volume',
action='append',
default=[],
help="host's paths to reflect in container's filesystem",
dest='reflect')
parser.add_argument(
'-c', '--command',
action='store',
default='make',
help='command to run in the container',
dest='command')
parser.add_argument(
'-w', '--workdir',
action='store',
default=None,
help='path to the working directory; defaults to destination dir if unset',
dest='workdir')
parser.add_argument(
'-e', '--env',
action='append',
default=[],
help='env variables to set in the environment',
dest='envs')
[args, pass_args] = parser.parse_known_args()
destination = args.dst if args.dst else args.src
workdir = args.workdir if args.workdir else destination
command = '''
import os, shutil, subprocess, sys
os.environ['HOME'] = '/home/package'
ssh_home = '/home/package/.ssh'
if '{src}' != '{dst}':
ret = subprocess.call(['rsync', '--archive', '/tmp/src/', '{dst}'])
if ret != 0:
sys.exit(ret)
shutil.copytree('/tmp/keys', ssh_home)
for root, dirs, files in os.walk(ssh_home):
for dir in dirs:
os.chmod(os.path.join(root, dir), 0o700)
for file in files:
os.chmod(os.path.join(root, file), 0o600)
sh_command = 'eval $(ssh-agent) > /dev/null; ssh-add 2>&1; {command} {params}'
ret = subprocess.call(['sh', '-c', sh_command])
sys.exit(ret)
'''
command = command.format(
command=args.command,
params=' '.join(pass_args),
src=args.src,
dst=destination)
reflect = [(destination, 'rw')]
reflect.extend(zip(args.reflect, ['rw'] * len(args.reflect)))
ret = docker.run(tty=True,
interactive=True,
rm=True,
reflect=reflect,
volumes=[(args.keys, '/tmp/keys', 'ro'),
(args.src, '/tmp/src', 'ro')],
workdir=workdir,
image=args.image,
run_params=(['--privileged=true']),
command=['python', '-c', command],
user='package')
sys.exit(ret)
|
mit
| -2,463,403,816,468,090,400
| 25.015385
| 79
| 0.617978
| false
| 3.617112
| false
| false
| false
|
pandich/pymetrics
|
pymetrics/histogram.py
|
1
|
1143
|
import numpy as np
from metric import metric_decorated
from statistical_metric import StatisticalMetric
from pymetrics.unit.timeunit import now
time_key = 'time'
value_key = 'value'
class Histogram(StatisticalMetric):
time_series_dtype = np.dtype([
(time_key, float),
(value_key, float),
])
def __init__(self, name, dtype=time_series_dtype):
StatisticalMetric.__init__(self, name, dtype)
return
def update(self, event_time=None, value=None):
self.append((event_time or now(), value or 1))
return
def values(self):
return self._series[value_key]
def values_by_time(self, threshold):
filtered = np.where(self._series[time_key] >= threshold)
return self._series[filtered][value_key]
def __exit__(self, value_type, value, traceback):
self.update(value)
return
def histogrammed(target=None, **options):
def after(record):
record.histogram.update(record.result)
return
return metric_decorated(
target,
Histogram,
histogrammed,
after=after,
**options
)
|
apache-2.0
| 4,475,350,069,275,106,300
| 22.326531
| 64
| 0.627297
| false
| 3.848485
| false
| false
| false
|
crazyyoung01/vv
|
vn.trader/ctaAlgo/multiCtaTemplate.py
|
1
|
7374
|
# encoding: UTF-8
'''
本文件包含了CTA引擎中的策略开发用模板,开发策略时需要继承CtaTemplate类。
'''
from ctaBase import *
from vtConstant import *
########################################################################
class MultiCtaTemplate(object):
"""MultiCTA策略模板"""
# 策略类的名称和作者
className = 'MultiCtaTemplate'
author = EMPTY_UNICODE
# MongoDB数据库的名称,K线数据库默认为1分钟
tickDbName = TICK_DB_NAME
barDbName = MINUTE_DB_NAME
#barDbName = DATA_DB_NAME
# 策略的基本参数
name = EMPTY_UNICODE # 策略实例名称
productClass = EMPTY_STRING # 产品类型(只有IB接口需要)
currency = EMPTY_STRING # 货币(只有IB接口需要)
# 策略的基本变量,由引擎管理
inited = False # 是否进行了初始化
trading = False # 是否启动交易,由引擎管理
pos = {} # 持仓情况
vtSymbolList = [] # 交易的合约vt系统代码
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author']
vtSymbolKey = "vtSymbol"
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
self.ctaEngine = ctaEngine
# 设置策略的参数
if setting:
d = self.__dict__
for key in self.paramList:
if key in setting:
d[key] = setting[key]
#加载合约参数
if self.vtSymbolKey in setting:
self.vtSymbolList = setting[self.vtSymbolKey]
#初始化持仓
for symbol in self.vtSymbolList:
self.pos[symbol] = 0
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onAccount(self, account):
"""收到Account推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def buy(self, symbol, price, volume, stop=False):
"""买开"""
return self.sendOrder(CTAORDER_BUY, symbol, price, volume, stop)
#----------------------------------------------------------------------
def sell(self, symbol, price, volume, stop=False):
"""卖平"""
return self.sendOrder(CTAORDER_SELL, symbol, price, volume, stop)
#----------------------------------------------------------------------
def short(self, symbol, price, volume, stop=False):
"""卖开"""
return self.sendOrder(CTAORDER_SHORT, symbol, price, volume, stop)
#----------------------------------------------------------------------
def cover(self, symbol, price, volume, stop=False):
"""买平"""
return self.sendOrder(CTAORDER_COVER, symbol, price, volume, stop)
#----------------------------------------------------------------------
def sendOrder(self, orderType, symbol, price, volume, stop=False):
"""发送委托"""
if self.trading:
# 如果stop为True,则意味着发本地停止单
if stop:
self.writeCtaLog(u'%s1' %orderType)
vtOrderID = self.ctaEngine.sendStopOrder(symbol, orderType, price, volume, self)
else:
self.writeCtaLog(u'%s2' %orderType)
vtOrderID = self.ctaEngine.sendOrder(symbol, orderType, price, volume, self)
return vtOrderID
else:
# 交易停止时发单返回空字符串
return ''
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 如果发单号为空字符串,则不进行后续操作
if not vtOrderID:
return
if STOPORDERPREFIX in vtOrderID:
self.ctaEngine.cancelStopOrder(vtOrderID)
else:
self.ctaEngine.cancelOrder(vtOrderID)
#----------------------------------------------------------------------
def insertTick(self, tick):
"""向数据库中插入tick数据"""
self.ctaEngine.insertData(self.tickDbName, tick.vtSymbol, tick)
#----------------------------------------------------------------------
def insertBar(self, bar):
"""向数据库中插入bar数据"""
self.ctaEngine.insertData(self.barDbName, bar.vtSymbol, bar)
#----------------------------------------------------------------------
def loadTick(self, symbol, days):
"""读取tick数据"""
return self.ctaEngine.loadTick(self.tickDbName, symbol, days)
#----------------------------------------------------------------------
def loadBar(self, symbol, days):
"""读取bar数据"""
return self.ctaEngine.loadBar(self.barDbName, symbol, days)
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录CTA日志"""
content = self.name + ':' + content
self.ctaEngine.writeCtaLog(content)
#----------------------------------------------------------------------
def putEvent(self):
"""发出策略状态变化事件"""
self.ctaEngine.putStrategyEvent(self.name)
#----------------------------------------------------------------------
def getEngineType(self):
"""查询当前运行的环境"""
return self.ctaEngine.engineType
|
mit
| 1,487,841,099,240,909,300
| 33.378947
| 96
| 0.402174
| false
| 3.743266
| false
| false
| false
|
TylerTemp/wordz
|
wordz/single_select.py
|
1
|
3137
|
import textwrap
import curses
import logging
from wordz import keys
logger = logging.getLogger('single_select')
class SingleSelect(object):
check = '\u2713'
def __init__(self, content, current=0, default=0, padding=0, lineno=False):
self.content = content
self.current = current
self.select = default
self.padding = padding
self.lineno = lineno
def render(self, screen):
width = screen.width - self.padding
for index, raw_content in enumerate(self.content):
color = screen.HIGHLIGHT if index == self.current else None
if self.lineno:
lineno = ' %s.' % (index + 1)
else:
lineno = ''
prefix = (' ' * self.padding) + ('%s%s ' % (self.check if color else ' ', lineno))
_, old_x = screen.getyx()
screen.write(prefix, color)
_, new_x = screen.getyx()
indent = ' ' * new_x
for each_ch in raw_content:
screen.write(each_ch, color)
if each_ch == '\n':
screen.write(indent, color)
else:
_, now_x = screen.getyx()
if now_x >= width - 1:
screen.write('\n' + indent, color)
else:
screen.write('\n')
def handler(self, k):
# if k in (keys.KEY_ENTER, keys.KEY_SPACE):
# self.select = self.current
# return
if k == keys.KEY_UP:
offset = -1
elif k == keys.KEY_DOWN:
offset = 1
else:
allowed = len(self.content)
for each in ('A', 'a', '1'):
asc_num = ord(each)
index = k - asc_num
if 0 <= index < allowed:
break
else:
return False
self.select = self.current = index
return True
max_num = len(self.content) - 1
current = self.current + offset
if current < 0:
current = max_num
elif current > max_num:
current = 0
self.current = self.select = current
return False
def get_selected(self):
return self.select
def select_item(self, index):
self.select = index
def get(self):
return self.select
def main(stdscr):
import string
import sys
import atexit
ss = SingleSelect([('中文%s' % x) * 20 for x in range(4)], padding=5, lineno=True)
# ss = SingleSelect('ABCD', [(string.ascii_letters) * 4 for x in range(4)])
screen = Screen(stdscr)
atexit.register(lambda: sys.__stdout__.write('select %s' % ss.select))
while True:
with screen.handle(ss.handler) as s:
s.clear()
ss.render(s)
if __name__ == '__main__':
from wordz.screen import Screen
from wordz.bashlog import filelogger, stdoutlogger, DEBUG
from wordz.main import redirect
stdoutlogger(None, DEBUG)
redirect()
# filelogger('/tmp/wordz.log', None, DEBUG)
curses.wrapper(main)
|
gpl-3.0
| -9,084,918,205,113,767,000
| 27.225225
| 94
| 0.514204
| false
| 4.074122
| false
| false
| false
|
jerryryle/python-lz4ex
|
tests/lz4hc_test.py
|
1
|
1734
|
from lz4ex import lz4, lz4hc
import unittest
class TestLZ4(unittest.TestCase):
def test_compress_default(self):
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123"
input_data_size = len(input_data)
compressed = lz4hc.compress_hc(input_data, lz4hc.COMPRESSIONLEVEL_MAX)
decompressed = lz4.decompress_safe(compressed, input_data_size)
self.assertEqual(input_data, decompressed)
def test_create_and_free_stream(self):
stream = lz4hc.create_hc_stream(4*1024, lz4hc.COMPRESSIONLEVEL_MAX)
self.assertNotEqual(stream, None)
lz4hc.free_hc_stream(stream)
def test_stream_compress(self):
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123"
block_size = int((len(input_data)/2)+1)
stream = lz4hc.create_hc_stream(block_size, lz4hc.COMPRESSIONLEVEL_MAX)
self.assertNotEqual(stream, None)
compressed_data1 = lz4hc.compress_hc_continue(stream, input_data[:block_size])
compressed_data2 = lz4hc.compress_hc_continue(stream, input_data[block_size:])
lz4hc.free_hc_stream(stream)
stream = lz4.create_decode_stream(block_size)
self.assertNotEqual(stream, None)
decompressed_data1 = lz4.decompress_safe_continue(stream, compressed_data1)
decompressed_data2 = lz4.decompress_safe_continue(stream, compressed_data2)
lz4.free_decode_stream(stream)
decompressed_data = decompressed_data1+decompressed_data2
self.assertEqual(decompressed_data, input_data)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| 8,206,075,741,504,018,000
| 39.325581
| 136
| 0.72203
| false
| 3.135624
| true
| false
| false
|
EICT/C-BAS
|
src/vendor/geni_trust/gen-certs.py
|
1
|
10956
|
#!/usr/bin/env python
import sys
import os.path
import optparse
import geniutil
import datetime
import subprocess
import uuid
CA_CERT_FILE = 'ca-cert.pem'
CA_KEY_FILE = 'ca-key.pem'
SA_CERT_FILE = 'sa-cert.pem'
SA_KEY_FILE = 'sa-key.pem'
MA_CERT_FILE = 'ma-cert.pem'
MA_KEY_FILE = 'ma-key.pem'
AM_CERT_FILE = 'am-cert.pem'
AM_KEY_FILE = 'am-key.pem'
SERVER_CERT_FILE = 'ch-cert.pem'
SERVER_KEY_FILE = 'ch-key.pem'
ADMIN_NAME = 'root'
ADMIN_EMAIL = '%s@example.net' % (ADMIN_NAME,)
ADMIN_KEY_FILE = '%s-key.pem' % (ADMIN_NAME,)
ADMIN_CERT_FILE = '%s-cert.pem' % (ADMIN_NAME,)
ADMIN_CRED_FILE = '%s-cred.xml' % (ADMIN_NAME,)
USER_NAME = 'alice'
USER_EMAIL = '%s@example.com' % (USER_NAME,)
USER_KEY_FILE = '%s-key.pem' % (USER_NAME,)
USER_CERT_FILE = '%s-cert.pem' % (USER_NAME,)
USER_CRED_FILE = '%s-cred.xml' % (USER_NAME,)
USER_URN_FILE = '%s-urn.xml' % (USER_NAME,)
BAD_USER_NAME = 'malcom'
BAD_USER_EMAIL = '%s@example.org' % (BAD_USER_NAME,)
BAD_USER_KEY_FILE = '%s-key.pem' % (BAD_USER_NAME,)
BAD_USER_CERT_FILE = '%s-cert.pem' % (BAD_USER_NAME,)
BAD_USER_CRED_FILE = '%s-cred.xml' % (BAD_USER_NAME,)
SLICE_NAME = 'pizzaslice'
SLICE_CRED_FILE = 'pizzaslice_cred.xml'
EXPEDIENT_NAME = 'expedient'
EXPEDIENT_EMAIL = '%s@felix.eu' % (EXPEDIENT_NAME,)
EXPEDIENT_KEY_FILE = '%s-key.pem' % (EXPEDIENT_NAME,)
EXPEDIENT_CERT_FILE = '%s-cert.pem' % (EXPEDIENT_NAME,)
EXPEDIENT_CRED_FILE = '%s-cred.xml' % (EXPEDIENT_NAME,)
cert_serial_number = 10
CRED_EXPIRY = datetime.datetime.utcnow() + datetime.timedelta(days=100)
def write_file(dir_path, filename, str, silent=False):
path = os.path.join(dir_path, filename)
with open(path, 'w') as f:
f.write(str)
if not silent:
print " Wrote file %s" % (path,)
def read_file(dir_path, filename):
path = os.path.join(dir_path, filename)
contents = None
with open(path, 'r') as f:
contents = f.read()
return contents
def insert_user(username, urn, cert, creds, uuid, firstName, lastName, email, clearDB=False):
import pymongo
client = pymongo.MongoClient('localhost', 27017)
database = client['ohouse']
if clearDB:
database['ma'].drop()
database['sa'].drop()
create_fields = {"MEMBER_CERTIFICATE": cert,
"MEMBER_UID" : uuid,
"MEMBER_FIRSTNAME": firstName,
"MEMBER_LASTNAME": lastName,
"MEMBER_USERNAME": username,
"MEMBER_EMAIL": email,
"MEMBER_CREDENTIALS": creds,
"MEMBER_URN": urn,
"type" : "member"}
database['ma'].insert(create_fields)
if __name__ == "__main__":
parser = optparse.OptionParser(usage = "usage: %prog directory_path")
parser.add_option("--silent", action="store_true", help="Silence output", default=False)
parser.add_option("--authority", help="Authority to use", default="")
parser.add_option("--ca_cert_path", help="Path to CA certificate files ca-cert.pem and ca-key.pem (defaults to None)", default=None)
opts, args = parser.parse_args(sys.argv)
if len(args) == 1: # no args given, index 0 is the script name
parser.print_help()
sys.exit(0)
#Simple test for xmlsec1 presence on system
try :
with open(os.devnull, "w") as null:
subprocess.call(["xmlsec1", "-h"], stdout = null, stderr = null)
except OSError:
print "xmlsec1 not found. Please install xmsec1 (http://www.aleksey.com/xmlsec/)."
sys.exit(0)
dir_path = args[1]
if not os.path.isdir(dir_path):
raise ValueError("The given path does not exist.")
#<UT>
if not opts.authority:
var = raw_input("Please enter CBAS authority/hostname (default: cbas.eict.de) ")
if not var:
authority= 'cbas.eict.de'
else:
authority = var
else:
authority = opts.authority
if not opts.ca_cert_path:
if not opts.silent:
print "Creating CA certificate"
urn = geniutil.encode_urn(authority, 'authority', 'ca')
cert_serial_number += 1
ca_c, ca_pu, ca_pr = geniutil.create_certificate(urn, is_ca=True, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, CA_CERT_FILE, ca_c, opts.silent)
write_file(dir_path, CA_KEY_FILE, ca_pr, opts.silent)
else:
if not os.path.isdir(opts.ca_cert_path):
raise ValueError("The given path for CA certificate files does not exist.")
ca_c = read_file(dir_path, CA_CERT_FILE)
ca_pr = read_file(dir_path, CA_KEY_FILE)
autority_urn, _, _ = geniutil.extract_certificate_info(ca_c)
authority = geniutil.decode_urn(autority_urn)[0]
if not opts.silent:
print "Using CA certificate from "+authority
if not opts.silent:
print "Creating SA certificate"
urn = geniutil.encode_urn(authority, 'authority', 'sa')
cert_serial_number += 1
sa_c, sa_pu, sa_pr = geniutil.create_certificate(urn, ca_pr, ca_c, is_ca=True, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, SA_CERT_FILE, sa_c, opts.silent)
write_file(dir_path, SA_KEY_FILE, sa_pr, opts.silent)
if not opts.silent:
print "Creating MA certificate"
urn = geniutil.encode_urn(authority, 'authority', 'ma')
cert_serial_number += 1
ma_c, ma_pu, ma_pr = geniutil.create_certificate(urn, ca_pr, ca_c, is_ca=True, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, MA_CERT_FILE, ma_c, opts.silent)
write_file(dir_path, MA_KEY_FILE, ma_pr, opts.silent)
if not opts.silent:
print "Creating AM certificate"
urn = geniutil.encode_urn(authority, 'authority', 'am')
cert_serial_number += 1
am_c, am_pu, am_pr = geniutil.create_certificate(urn, ca_pr, ca_c, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, AM_CERT_FILE, am_c, opts.silent)
write_file(dir_path, AM_KEY_FILE, am_pr, opts.silent)
if not opts.silent:
print "--------------------"
print "You may want to configure the above certificates & private keys in your SA/MA/AM servers."
print "Also, you may want to add the SA & MA certificates to the trusted_roots of the AM servers."
print "--------------------"
if not opts.silent:
print "Creating server certificate"
urn = geniutil.encode_urn(authority, 'authority', 'ch')
cert_serial_number += 1
server_c, _, server_pr = geniutil.create_certificate(urn, ca_pr, ca_c, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, SERVER_CERT_FILE, server_c, opts.silent)
write_file(dir_path, SERVER_KEY_FILE, server_pr, opts.silent)
if not opts.silent:
print "Creating test user cert and cred (valid, signed by MA)"
urn = geniutil.encode_urn(authority, 'user', USER_NAME)
cert_serial_number += 1
u_c,u_pu,u_pr = geniutil.create_certificate(urn, issuer_key=ma_pr, issuer_cert=ma_c, email=USER_EMAIL,
serial_number=cert_serial_number, uuidarg=str(uuid.uuid4()))
write_file(dir_path, USER_CERT_FILE, u_c, opts.silent)
write_file(dir_path, USER_KEY_FILE, u_pr, opts.silent)
u_cred = geniutil.create_credential_ex(u_c, u_c, ma_pr, ma_c, ['PROJECT_CREATE', 'PROJECT_REMOVE', 'SLICE_CREATE'], CRED_EXPIRY)
write_file(dir_path, USER_CRED_FILE, u_cred, opts.silent)
write_file(dir_path, USER_URN_FILE, urn, opts.silent)
if not opts.silent:
print "Creating bad test user cert and cred (invalid, self-signed)"
urn = geniutil.encode_urn(authority, 'user', BAD_USER_NAME)
cert_serial_number += 1
bu_c,bu_pu,bu_pr = geniutil.create_certificate(urn, email=BAD_USER_EMAIL, serial_number=cert_serial_number,
uuidarg=str(uuid.uuid4()))
write_file(dir_path, BAD_USER_CERT_FILE, bu_c, opts.silent)
write_file(dir_path, BAD_USER_KEY_FILE, bu_pr, opts.silent)
bu_cred = geniutil.create_credential(bu_c, bu_c, ma_pr, ma_c, "user", CRED_EXPIRY)
write_file(dir_path, BAD_USER_CRED_FILE, bu_cred, opts.silent)
if not opts.silent:
print "Creating admin cert and cred"
urn = geniutil.encode_urn(authority, 'user', ADMIN_NAME)
admin_uuid = str(uuid.uuid4())
cert_serial_number += 1
a_c,a_pu,a_pr = geniutil.create_certificate(urn, issuer_key=ma_pr, issuer_cert=ma_c, email=ADMIN_EMAIL,
serial_number=cert_serial_number, uuidarg=admin_uuid, life_days=10000)
write_file(dir_path, ADMIN_CERT_FILE, a_c, opts.silent)
write_file(dir_path, ADMIN_KEY_FILE, a_pr, opts.silent)
p_list = ["GLOBAL_MEMBERS_VIEW", "GLOBAL_MEMBERS_WILDCARDS", "GLOBAL_PROJECTS_MONITOR", "GLOBAL_PROJECTS_VIEW",
"GLOBAL_PROJECTS_WILDCARDS", "MEMBER_REGISTER", "SERVICE_REMOVE", "SERVICE_VIEW",
"MEMBER_REMOVE_REGISTRATION", "SERVICE_REGISTER"]
a_cred = geniutil.create_credential_ex(a_c, a_c, ma_pr, ma_c, p_list, CRED_EXPIRY)
write_file(dir_path, ADMIN_CRED_FILE, a_cred, opts.silent)
insert_user(ADMIN_NAME,urn,a_c,a_cred,admin_uuid,'System', 'Administrator','root@cbas.de', True)
urn = geniutil.encode_urn(authority, 'user', EXPEDIENT_NAME)
exp_uuid = str(uuid.uuid4())
cert_serial_number += 1
a_c,a_pu,a_pr = geniutil.create_certificate(urn, issuer_key=ma_pr, issuer_cert=ma_c, email=EXPEDIENT_EMAIL,
serial_number=cert_serial_number, uuidarg=exp_uuid, life_days=10000)
write_file(dir_path, EXPEDIENT_CERT_FILE, a_c, opts.silent)
write_file(dir_path, EXPEDIENT_KEY_FILE, a_pr, opts.silent)
p_list = ["GLOBAL_MEMBERS_VIEW", "GLOBAL_MEMBERS_WILDCARDS", "GLOBAL_PROJECTS_MONITOR", "GLOBAL_PROJECTS_VIEW",
"GLOBAL_PROJECTS_WILDCARDS", "MEMBER_REGISTER", "SERVICE_REMOVE", "SERVICE_VIEW",
"MEMBER_REMOVE_REGISTRATION", "SERVICE_REGISTER"]
a_cred = geniutil.create_credential_ex(a_c, a_c, ma_pr, ma_c, p_list, CRED_EXPIRY)
write_file(dir_path, EXPEDIENT_CRED_FILE, a_cred, opts.silent)
insert_user(EXPEDIENT_NAME,urn,a_c,a_cred,exp_uuid,'Expedient', 'User-agent','expedient@cbas.de')
if not opts.silent:
print "Creating slice credential for valid test user"
urn = geniutil.encode_urn(authority, 'slice', SLICE_NAME)
s_c = geniutil.create_slice_certificate(urn, sa_pr, sa_c, CRED_EXPIRY)
s_cred = geniutil.create_credential(u_c, s_c, sa_pr, sa_c, "slice", CRED_EXPIRY)
write_file(dir_path, SLICE_CRED_FILE, s_cred, opts.silent)
if not opts.silent:
print "--------------------"
print "You can use the user certificates and slice cert to test. In production you may acquire them from a MA and SA."
print "--------------------"
|
bsd-3-clause
| 7,574,652,154,376,849,000
| 45.621277
| 136
| 0.629518
| false
| 3.057773
| false
| false
| false
|
1032231418/python
|
lesson10/apps/books/publish/__init__.py
|
1
|
3345
|
# coding=utf8
from django.views.generic import ListView, DetailView, CreateView
from django.db.models import Q
from django.http import JsonResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render
from pure_pagination.mixins import PaginationMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.conf import settings
from books.models import Publish, Author, Book
from books.forms import PublishForm
import json
import logging
logger = logging.getLogger('opsweb')
class PublishListView(LoginRequiredMixin, PaginationMixin, ListView):
'''
动作:getlist, create
'''
model = Publish
template_name = "books/publish_list.html"
context_object_name = "publish_list"
paginate_by = 5
keyword = ''
def get_queryset(self):
queryset = super(PublishListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword', '').strip()
if self.keyword:
queryset = queryset.filter(Q(name__icontains=self.keyword) |
Q(address__icontains=self.keyword) |
Q(city__icontains=self.keyword))
return queryset
def get_context_data(self, **kwargs):
context = super(PublishListView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request):
form = PublishForm(request.POST)
if form.is_valid():
form.save()
res = {'code': 0, 'result': '添加出版商成功'}
else:
# form.errors会把验证不通过的信息以对象的形式传到前端,前端直接渲染即可
res = {'code': 1, 'errmsg': form.errors}
print form.errors
return JsonResponse(res, safe=True)
class PublishDetailView(LoginRequiredMixin, DetailView):
'''
动作:getone, update, delete
'''
model = Publish
template_name = "books/publish_detail.html"
context_object_name = 'publish'
next_url = '/books/publishlist/'
def post(self, request, *args, **kwargs):
pk = kwargs.get('pk')
p = self.model.objects.get(pk=pk)
form = PublishForm(request.POST, instance=p)
if form.is_valid():
form.save()
res = {"code": 0, "result": "更新出版商成功", 'next_url': self.next_url}
else:
res = {"code": 1, "errmsg": form.errors, 'next_url': self.next_url}
return render(request, settings.JUMP_PAGE, res)
# return HttpResponseRedirect(reverse('books:publish_detail',args=[pk]))
def delete(self, request, *args, **kwargs):
pk = kwargs.get('pk')
# 通过出版社对象查所在该出版社的书籍,如果有关联书籍不可以删除,没有关联书籍可以删除
try:
obj = self.model.objects.get(pk=pk)
if not obj.book_set.all():
self.model.objects.filter(pk=pk).delete()
res = {"code": 0, "result": "删除出版商成功"}
else:
res = {"code": 1, "errmsg": "该出版社有关联书籍,请联系管理员"}
except:
res = {"code": 1, "errmsg": "删除错误请联系管理员"}
return JsonResponse(res, safe=True)
|
apache-2.0
| -5,549,527,290,084,019,000
| 32.706522
| 80
| 0.613028
| false
| 3.284958
| false
| false
| false
|
mattilyra/gensim
|
gensim/corpora/svmlightcorpus.py
|
1
|
5903
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in SVMlight format."""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger(__name__)
class SvmLightCorpus(IndexedCorpus):
"""Corpus in SVMlight format.
Quoting http://svmlight.joachims.org/:
The input file contains the training examples. The first lines may contain comments and are ignored
if they start with #. Each of the following lines represents one training example
and is of the following format::
<line> .=. <target> <feature>:<value> <feature>:<value> ... <feature>:<value> # <info>
<target> .=. +1 | -1 | 0 | <float>
<feature> .=. <integer> | "qid"
<value> .=. <float>
<info> .=. <string>
The "qid" feature (used for SVMlight ranking), if present, is ignored.
Notes
-----
Although not mentioned in the specification above, SVMlight also expect its feature ids to be 1-based
(counting starts at 1). We convert features to 0-base internally by decrementing all ids when loading a SVMlight
input file, and increment them again when saving as SVMlight.
"""
def __init__(self, fname, store_labels=True):
"""
Parameters
----------
fname: str
Path to corpus.
store_labels : bool, optional
Whether to store labels (~SVM target class). They currently have no application but stored
in `self.labels` for convenience by default.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.length = None
self.store_labels = store_labels
self.labels = []
def __iter__(self):
""" Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
lineno = -1
self.labels = []
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
doc = self.line2doc(line)
if doc is not None:
if self.store_labels:
self.labels.append(doc[1])
yield doc[0]
self.length = lineno + 1
@staticmethod
def save_corpus(fname, corpus, id2word=None, labels=False, metadata=False):
"""Save a corpus in the SVMlight format.
The SVMlight `<target>` class tag is taken from the `labels` array, or set to 0 for all documents
if `labels` is not supplied.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
labels : list or False
An SVMlight `<target>` class tags or False if not present.
metadata : bool
ARGUMENT WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
logger.info("converting corpus to SVMlight format: %s", fname)
offsets = []
with utils.smart_open(fname, 'wb') as fout:
for docno, doc in enumerate(corpus):
label = labels[docno] if labels else 0 # target class is 0 by default
offsets.append(fout.tell())
fout.write(utils.to_utf8(SvmLightCorpus.doc2line(doc, label)))
return offsets
def docbyoffset(self, offset):
"""Get the document stored at file position `offset`.
Parameters
----------
offset : int
Document's position.
Returns
-------
tuple of (int, float)
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())[0]
# TODO: it brakes if gets None from line2doc
def line2doc(self, line):
"""Get a document from a single line in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.doc2line`.
Parameters
----------
line : str
Line in SVMLight format.
Returns
-------
(list of (int, float), str)
Document in BoW format and target class label.
"""
line = utils.to_unicode(line)
line = line[: line.find('#')].strip()
if not line:
return None # ignore comments and empty lines
parts = line.split()
if not parts:
raise ValueError('invalid line format in %s' % self.fname)
target, fields = parts[0], [part.rsplit(':', 1) for part in parts[1:]]
# ignore 'qid' features, convert 1-based feature ids to 0-based
doc = [(int(p1) - 1, float(p2)) for p1, p2 in fields if p1 != 'qid']
return doc, target
@staticmethod
def doc2line(doc, label=0):
"""Convert BoW representation of document in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.line2doc`.
Parameters
----------
doc : list of (int, float)
Document in BoW format.
label : int, optional
Document label (if provided).
Returns
-------
str
`doc` in SVMlight format.
"""
pairs = ' '.join("%i:%s" % (termid + 1, termval) for termid, termval in doc) # +1 to convert 0-base to 1-base
return "%s %s\n" % (label, pairs)
|
lgpl-2.1
| 1,579,964,182,420,635,000
| 30.736559
| 118
| 0.569372
| false
| 4.09362
| false
| false
| false
|
annoviko/pyclustering
|
pyclustering/utils/metric.py
|
1
|
20881
|
"""!
@brief Module provides various distance metrics - abstraction of the notion of distance in a metric space.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import numpy
from enum import IntEnum
class type_metric(IntEnum):
"""!
@brief Enumeration of supported metrics in the module for distance calculation between two points.
"""
## Euclidean distance, for more information see function 'euclidean_distance'.
EUCLIDEAN = 0
## Square Euclidean distance, for more information see function 'euclidean_distance_square'.
EUCLIDEAN_SQUARE = 1
## Manhattan distance, for more information see function 'manhattan_distance'.
MANHATTAN = 2
## Chebyshev distance, for more information see function 'chebyshev_distance'.
CHEBYSHEV = 3
## Minkowski distance, for more information see function 'minkowski_distance'.
MINKOWSKI = 4
## Canberra distance, for more information see function 'canberra_distance'.
CANBERRA = 5
## Chi square distance, for more information see function 'chi_square_distance'.
CHI_SQUARE = 6
## Gower distance, for more information see function 'gower_distance'.
GOWER = 7
## User defined function for distance calculation between two points.
USER_DEFINED = 1000
class distance_metric:
"""!
@brief Distance metric performs distance calculation between two points in line with encapsulated function, for
example, euclidean distance or chebyshev distance, or even user-defined.
@details
Example of Euclidean distance metric:
@code
metric = distance_metric(type_metric.EUCLIDEAN)
distance = metric([1.0, 2.5], [-1.2, 3.4])
@endcode
Example of Chebyshev distance metric:
@code
metric = distance_metric(type_metric.CHEBYSHEV)
distance = metric([0.0, 0.0], [2.5, 6.0])
@endcode
In following example additional argument should be specified (generally, 'degree' is a optional argument that is
equal to '2' by default) that is specific for Minkowski distance:
@code
metric = distance_metric(type_metric.MINKOWSKI, degree=4)
distance = metric([4.0, 9.2, 1.0], [3.4, 2.5, 6.2])
@endcode
User may define its own function for distance calculation. In this case input is two points, for example, you
want to implement your own version of Manhattan distance:
@code
from pyclustering.utils.metric import distance_metric, type_metric
def my_manhattan(point1, point2):
dimension = len(point1)
result = 0.0
for i in range(dimension):
result += abs(point1[i] - point2[i]) * 0.1
return result
metric = distance_metric(type_metric.USER_DEFINED, func=my_manhattan)
distance = metric([2.0, 3.0], [1.0, 3.0])
@endcode
"""
def __init__(self, metric_type, **kwargs):
"""!
@brief Creates distance metric instance for calculation distance between two points.
@param[in] metric_type (type_metric):
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'numpy_usage' 'func' and corresponding additional argument for
for specific metric types).
<b>Keyword Args:</b><br>
- func (callable): Callable object with two arguments (point #1 and point #2) or (object #1 and object #2) in case of numpy usage.
This argument is used only if metric is 'type_metric.USER_DEFINED'.
- degree (numeric): Only for 'type_metric.MINKOWSKI' - degree of Minkowski equation.
- max_range (array_like): Only for 'type_metric.GOWER' - max range in each dimension. 'data' can be used
instead of this parameter.
- data (array_like): Only for 'type_metric.GOWER' - input data that used for 'max_range' calculation.
'max_range' can be used instead of this parameter.
- numpy_usage (bool): If True then numpy is used for calculation (by default is False).
"""
self.__type = metric_type
self.__args = kwargs
self.__func = self.__args.get('func', None)
self.__numpy = self.__args.get('numpy_usage', False)
self.__calculator = self.__create_distance_calculator()
def __call__(self, point1, point2):
"""!
@brief Calculates distance between two points.
@param[in] point1 (list): The first point.
@param[in] point2 (list): The second point.
@return (double) Distance between two points.
"""
return self.__calculator(point1, point2)
def get_type(self):
"""!
@brief Return type of distance metric that is used.
@return (type_metric) Type of distance metric.
"""
return self.__type
def get_arguments(self):
"""!
@brief Return additional arguments that are used by distance metric.
@return (dict) Additional arguments.
"""
return self.__args
def get_function(self):
"""!
@brief Return user-defined function for calculation distance metric.
@return (callable): User-defined distance metric function.
"""
return self.__func
def enable_numpy_usage(self):
"""!
@brief Start numpy for distance calculation.
@details Useful in case matrices to increase performance. No effect in case of type_metric.USER_DEFINED type.
"""
self.__numpy = True
if self.__type != type_metric.USER_DEFINED:
self.__calculator = self.__create_distance_calculator()
def disable_numpy_usage(self):
"""!
@brief Stop using numpy for distance calculation.
@details Useful in case of big amount of small data portion when numpy call is longer than calculation itself.
No effect in case of type_metric.USER_DEFINED type.
"""
self.__numpy = False
self.__calculator = self.__create_distance_calculator()
def __create_distance_calculator(self):
"""!
@brief Creates distance metric calculator.
@return (callable) Callable object of distance metric calculator.
"""
if self.__numpy is True:
return self.__create_distance_calculator_numpy()
return self.__create_distance_calculator_basic()
def __create_distance_calculator_basic(self):
"""!
@brief Creates distance metric calculator that does not use numpy.
@return (callable) Callable object of distance metric calculator.
"""
if self.__type == type_metric.EUCLIDEAN:
return euclidean_distance
elif self.__type == type_metric.EUCLIDEAN_SQUARE:
return euclidean_distance_square
elif self.__type == type_metric.MANHATTAN:
return manhattan_distance
elif self.__type == type_metric.CHEBYSHEV:
return chebyshev_distance
elif self.__type == type_metric.MINKOWSKI:
return lambda point1, point2: minkowski_distance(point1, point2, self.__args.get('degree', 2))
elif self.__type == type_metric.CANBERRA:
return canberra_distance
elif self.__type == type_metric.CHI_SQUARE:
return chi_square_distance
elif self.__type == type_metric.GOWER:
max_range = self.__get_gower_max_range()
return lambda point1, point2: gower_distance(point1, point2, max_range)
elif self.__type == type_metric.USER_DEFINED:
return self.__func
else:
raise ValueError("Unknown type of metric: '%d'", self.__type)
def __get_gower_max_range(self):
"""!
@brief Returns max range for Gower distance using input parameters ('max_range' or 'data').
@return (numpy.array) Max range for Gower distance.
"""
max_range = self.__args.get('max_range', None)
if max_range is None:
data = self.__args.get('data', None)
if data is None:
raise ValueError("Gower distance requires 'data' or 'max_range' argument to construct metric.")
max_range = numpy.max(data, axis=0) - numpy.min(data, axis=0)
self.__args['max_range'] = max_range
return max_range
def __create_distance_calculator_numpy(self):
"""!
@brief Creates distance metric calculator that uses numpy.
@return (callable) Callable object of distance metric calculator.
"""
if self.__type == type_metric.EUCLIDEAN:
return euclidean_distance_numpy
elif self.__type == type_metric.EUCLIDEAN_SQUARE:
return euclidean_distance_square_numpy
elif self.__type == type_metric.MANHATTAN:
return manhattan_distance_numpy
elif self.__type == type_metric.CHEBYSHEV:
return chebyshev_distance_numpy
elif self.__type == type_metric.MINKOWSKI:
return lambda object1, object2: minkowski_distance_numpy(object1, object2, self.__args.get('degree', 2))
elif self.__type == type_metric.CANBERRA:
return canberra_distance_numpy
elif self.__type == type_metric.CHI_SQUARE:
return chi_square_distance_numpy
elif self.__type == type_metric.GOWER:
max_range = self.__get_gower_max_range()
return lambda object1, object2: gower_distance_numpy(object1, object2, max_range)
elif self.__type == type_metric.USER_DEFINED:
return self.__func
else:
raise ValueError("Unknown type of metric: '%d'", self.__type)
def euclidean_distance(point1, point2):
"""!
@brief Calculate Euclidean distance between two vectors.
@details The Euclidean between vectors (points) a and b is calculated by following formula:
\f[
dist(a, b) = \sqrt{ \sum_{i=0}^{N}(a_{i} - b_{i})^{2} };
\f]
Where N is a length of each vector.
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Euclidean distance between two vectors.
@see euclidean_distance_square, manhattan_distance, chebyshev_distance
"""
distance = euclidean_distance_square(point1, point2)
return distance ** 0.5
def euclidean_distance_numpy(object1, object2):
"""!
@brief Calculate Euclidean distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Euclidean distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.sqrt(numpy.sum(numpy.square(object1 - object2), axis=1))
else:
return numpy.sqrt(numpy.sum(numpy.square(object1 - object2)))
def euclidean_distance_square(point1, point2):
"""!
@brief Calculate square Euclidean distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}(a_{i} - b_{i})^{2};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Square Euclidean distance between two vectors.
@see euclidean_distance, manhattan_distance, chebyshev_distance
"""
distance = 0.0
for i in range(len(point1)):
distance += (point1[i] - point2[i]) ** 2.0
return distance
def euclidean_distance_square_numpy(object1, object2):
"""!
@brief Calculate square Euclidean distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Square Euclidean distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.sum(numpy.square(object1 - object2), axis=1).T
else:
return numpy.sum(numpy.square(object1 - object2))
def manhattan_distance(point1, point2):
"""!
@brief Calculate Manhattan distance between between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}\left | a_{i} - b_{i} \right |;
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Manhattan distance between two vectors.
@see euclidean_distance_square, euclidean_distance, chebyshev_distance
"""
distance = 0.0
dimension = len(point1)
for i in range(dimension):
distance += abs(point1[i] - point2[i])
return distance
def manhattan_distance_numpy(object1, object2):
"""!
@brief Calculate Manhattan distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Manhattan distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.sum(numpy.absolute(object1 - object2), axis=1).T
else:
return numpy.sum(numpy.absolute(object1 - object2))
def chebyshev_distance(point1, point2):
"""!
@brief Calculate Chebyshev distance (maximum metric) between between two vectors.
@details Chebyshev distance is a metric defined on a vector space where the distance between two vectors is the
greatest of their differences along any coordinate dimension.
\f[
dist(a, b) = \max_{}i\left (\left | a_{i} - b_{i} \right |\right );
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Chebyshev distance between two vectors.
@see euclidean_distance_square, euclidean_distance, minkowski_distance
"""
distance = 0.0
dimension = len(point1)
for i in range(dimension):
distance = max(distance, abs(point1[i] - point2[i]))
return distance
def chebyshev_distance_numpy(object1, object2):
"""!
@brief Calculate Chebyshev distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Chebyshev distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.max(numpy.absolute(object1 - object2), axis=1).T
else:
return numpy.max(numpy.absolute(object1 - object2))
def minkowski_distance(point1, point2, degree=2):
"""!
@brief Calculate Minkowski distance between two vectors.
\f[
dist(a, b) = \sqrt[p]{ \sum_{i=0}^{N}\left(a_{i} - b_{i}\right)^{p} };
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] degree (numeric): Degree of that is used for Minkowski distance.
@return (double) Minkowski distance between two vectors.
@see euclidean_distance
"""
distance = 0.0
for i in range(len(point1)):
distance += (point1[i] - point2[i]) ** degree
return distance ** (1.0 / degree)
def minkowski_distance_numpy(object1, object2, degree=2):
"""!
@brief Calculate Minkowski distance between objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@param[in] degree (numeric): Degree of that is used for Minkowski distance.
@return (double) Minkowski distance between two object.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.power(numpy.sum(numpy.power(object1 - object2, degree), axis=1), 1/degree)
else:
return numpy.power(numpy.sum(numpy.power(object1 - object2, degree)), 1 / degree)
def canberra_distance(point1, point2):
"""!
@brief Calculate Canberra distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}\frac{\left | a_{i} - b_{i} \right |}{\left | a_{i} \right | + \left | b_{i} \right |};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (float) Canberra distance between two objects.
"""
distance = 0.0
for i in range(len(point1)):
divider = abs(point1[i]) + abs(point2[i])
if divider == 0.0:
continue
distance += abs(point1[i] - point2[i]) / divider
return distance
def canberra_distance_numpy(object1, object2):
"""!
@brief Calculate Canberra distance between two objects using numpy.
@param[in] object1 (array_like): The first vector.
@param[in] object2 (array_like): The second vector.
@return (float) Canberra distance between two objects.
"""
with numpy.errstate(divide='ignore', invalid='ignore'):
result = numpy.divide(numpy.abs(object1 - object2), numpy.abs(object1) + numpy.abs(object2))
if len(result.shape) > 1:
return numpy.sum(numpy.nan_to_num(result), axis=1).T
else:
return numpy.sum(numpy.nan_to_num(result))
def chi_square_distance(point1, point2):
"""!
@brief Calculate Chi square distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}\frac{\left ( a_{i} - b_{i} \right )^{2}}{\left | a_{i} \right | + \left | b_{i} \right |};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (float) Chi square distance between two objects.
"""
distance = 0.0
for i in range(len(point1)):
divider = abs(point1[i]) + abs(point2[i])
if divider != 0.0:
distance += ((point1[i] - point2[i]) ** 2.0) / divider
return distance
def chi_square_distance_numpy(object1, object2):
"""!
@brief Calculate Chi square distance between two vectors using numpy.
@param[in] object1 (array_like): The first vector.
@param[in] object2 (array_like): The second vector.
@return (float) Chi square distance between two objects.
"""
with numpy.errstate(divide='ignore', invalid='ignore'):
result = numpy.divide(numpy.power(object1 - object2, 2), numpy.abs(object1) + numpy.abs(object2))
if len(result.shape) > 1:
return numpy.sum(numpy.nan_to_num(result), axis=1).T
else:
return numpy.sum(numpy.nan_to_num(result))
def gower_distance(point1, point2, max_range):
"""!
@brief Calculate Gower distance between two vectors.
@details Implementation is based on the paper @cite article::utils::metric::gower. Gower distance is calculate
using following formula:
\f[
dist\left ( a, b \right )=\frac{1}{p}\sum_{i=0}^{p}\frac{\left | a_{i} - b_{i} \right |}{R_{i}},
\f]
where \f$R_{i}\f$ is a max range for ith dimension. \f$R\f$ is defined in line following formula:
\f[
R=max\left ( X \right )-min\left ( X \right )
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] max_range (array_like): Max range in each data dimension.
@return (float) Gower distance between two objects.
"""
distance = 0.0
dimensions = len(point1)
for i in range(dimensions):
if max_range[i] != 0.0:
distance += abs(point1[i] - point2[i]) / max_range[i]
return distance / dimensions
def gower_distance_numpy(point1, point2, max_range):
"""!
@brief Calculate Gower distance between two vectors using numpy.
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] max_range (array_like): Max range in each data dimension.
@return (float) Gower distance between two objects.
"""
with numpy.errstate(divide='ignore', invalid='ignore'):
result = numpy.divide(numpy.abs(point1 - point2), max_range)
if len(result.shape) > 1:
return numpy.sum(numpy.nan_to_num(result), axis=1).T / len(result[0])
else:
return numpy.sum(numpy.nan_to_num(result)) / len(point1)
|
gpl-3.0
| -7,497,159,329,073,956,000
| 30.883465
| 142
| 0.614338
| false
| 3.813185
| false
| false
| false
|
madhusudancs/pytask
|
scripts/generate_tasks_from_csv.py
|
1
|
2862
|
#!/usr/bin/env python
#
# Copyright 2011 Authors of PyTask.
#
# This file is part of PyTask.
#
# PyTask is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyTask is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# You should have received a copy of the GNU General Public License
# along with PyTask. If not, see <http://www.gnu.org/licenses/>.
"""Module to fill database with the tasks supplied in CSV.
This module takes the directory containing the csv files as
argument and creates task for the data in each CSV file in
this directory.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>'
]
import csv
import datetime
import os
import sys
from django.contrib.auth.models import User
from pytask.taskapp.models import Task
STATIC_DATA = {
'created_by': User.objects.get(pk=1),
'creation_datetime': datetime.datetime.now()
}
def get_textbooks_from_csv(directory, file_name):
"""Return the list of the titles of tasks.
Args:
file: name of the CSV file from which tasks must be fetched.
"""
file_absolute_name = os.path.join(directory, file_name)
csv_obj = csv.reader(open(file_absolute_name))
# Nifty trick to separate the file extension out and get the
# remaining part of the filename to use this as the tag for
# branches/departments
branch_name = os.extsep.join(file_name.split(os.extsep)[:-1])
textbooks = []
for line in csv_obj:
if len(line) == 2 and line[0]:
sep = ' by '
else:
sep = ''
textbooks.append({
'title': sep.join(line),
'desc': '(To be filled in by the Coordinator or the T/A.)',
'tags_field': ', '. join(['Textbook', branch_name, line[1]]),
'pynts': 10,
'status': 'Open',
})
return textbooks
def seed_db(data):
"""Seeds the database when the data is passed as the argument
Args:
data: A dictionary containing the data to be seeded into the
task model.
"""
for task in data:
task.update(STATIC_DATA)
task_obj = Task(**task)
task_obj.save()
def main():
"""Just a wrapper function to make call the functions that perform
the action.
"""
for dir in sys.argv[1:]:
args = list(os.walk(dir))
files = args[0][2]
for file_name in files:
tasks = get_textbooks_from_csv(args[0][0], file_name)
seed_db(tasks)
if __name__ == '__main__':
main()
|
agpl-3.0
| 2,580,936,690,223,775,000
| 25.747664
| 71
| 0.643256
| false
| 3.683398
| false
| false
| false
|
UKPLab/sentence-transformers
|
sentence_transformers/losses/CosineSimilarityLoss.py
|
1
|
2213
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
"""
CosineSimilarityLoss expects, that the InputExamples consists of two texts and a float label.
It computes the vectors u = model(input_text[0]) and v = model(input_text[1]) and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ||input_label - cos_score_transformation(cosine_sim(u,v))||_2.
:param model: SentenceTranformer model
:param loss_fct: Which pytorch loss function should be used to compare the cosine_similartiy(u,v) with the input_label? By default, MSE: ||input_label - cosine_sim(u,v)||_2
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity. By default, the identify function is used (i.e. no change).
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, losses
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, loss_fct = nn.MSELoss(), cos_score_transformation=nn.Identity()):
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.view(-1))
|
apache-2.0
| 990,618,158,397,479,200
| 51.690476
| 177
| 0.712155
| false
| 3.875657
| false
| false
| false
|
damiendart/nfsnapi-python
|
nfsnapi.py
|
1
|
5512
|
"""Stuff to make working with the NearlyFreeSpeech.NET API easier.
>>> import nfsnapi
>>> # Replace USERNAME, API_KEY, and so on with actual values.
>>> nfsnapi.run_request("USERNAME", "API_KEY",
... "/account/ACCOUNT_NUMBER/balance")
'10.56'
>>> nfsnapi.run_request("USERNAME", "API_KEY",
... "/dns/DOMAIN/listRRs", "type=A")
(A bunch of JSON not shown.)
>>> # And so on...
This file was written by Damien Dart, <damiendart@pobox.com>. This is
free and unencumbered software released into the public domain. For more
information, please refer to the accompanying "UNLICENCE" file.
"""
__author__ = "Damien Dart, <damiendart@pobox.com>"
__license__ = "Unlicense"
__title__ = "nfsnapi"
__version__ = "0.3.0"
import json
import random
import string
import time
from hashlib import sha1
try:
from http.client import HTTPException
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
basestring = str
except ImportError:
from httplib import HTTPException
from urllib2 import urlopen, Request, HTTPError, URLError
def auth_header(username, API_key, request_path, request_body = b""):
"""Return a NearlyFreeSpeeech.NET authentication HTTP header field.
Returns a dictionary containing an authentication HTTP header field
required for NearlyFreeSpeech.NET API requests. For more information,
see <https://members.nearlyfreespeech.net/wiki/API/Introduction>.
- "username" should be a string containing the member login name of
the user making the request.
- "API_key" should be a string containing the API key associated with
the member login name; an API key can be obtained by submitting a
secure support request to NearlyFreeSpeeech.NET.
- "request_path" should be a string containing the path portion of the
requested URL. For example, if the requested URL is
<https://api.nearlyfreespeech.net/site/example/addAlias>,
"request_path" would be "/site/example/addAlias". The first
forward-slash is optional.
- "request_body" may be a bytestring containing the HTTP request
message body for HTTP POST requests, or an empty bytestring for GET
requests or if no such data is required. The data should be in the
standard "application/x-www-form-urlencoded" format.
"""
if (request_path[0] != "/"):
request_path = "/%s" % request_path
salt = "".join(random.choice(string.ascii_letters) for i in range(16))
timestamp = str(int(time.time()))
return { "X-NFSN-Authentication" : ";".join([username, timestamp, salt,
sha1(str(";".join([username, timestamp, salt, API_key, request_path,
sha1(request_body).hexdigest()])).encode("utf-8")).hexdigest()]) }
def run_request(username, API_key, request_path, request_body = None):
"""Run a NearlyFreeSpeech.NET API request, return a string response.
NOTE: This function does not verify the API server's certificate.
The NearlyFreeSpeech.net API documentation is unclear on whether every
successful API call returns a valid JSON-encoded associative array,
hence why any response is returned as a string. This method raises
"NFSNAPIRequestError" on errors.
- "username" should be a string containing the member login name of
the user making the request.
- "API_key" should be a string containing the API key associated with
the member login name; an API key can be obtained by submitting a
secure support request to NearlyFreeSpeeech.NET.
- "request_path" should be a string containing the path portion of the
requested URL. For example, if the requested URL is
<https://api.nearlyfreespeech.net/site/example/addAlias>,
"request_path" would be "/site/example/addAlias". The first
forward-slash is optional.
- "request_body" may be a string containing the HTTP request message
body for HTTP POST requests or "None" for HTTP GET requests. Pass
an empty string for HTTP POST requests that do not require a message
body. The data should be in the standard
"application/x-www-form-urlencoded" format.
"""
try:
if (request_path[0] != "/"):
request_path = "/%s" % request_path
if isinstance(request_body, basestring):
request_body = request_body.encode("utf-8")
return urlopen(Request("https://api.nearlyfreespeech.net" + request_path,
request_body, dict(auth_header(username, API_key, request_path,
request_body or b""), **{"User-Agent": "nfsnapi/" + __version__ +
" +https://www.robotinaponcho.net/git/?p=nfsnapi-python.git"}))).read().decode()
except HTTPException as e:
raise NFSNAPIRequestError(str(e))
except HTTPError as e:
try:
error = json.loads(e.read().decode())
raise NFSNAPIRequestError("\n".join([error["error"], error["debug"]]))
except (KeyError, ValueError):
raise NFSNAPIRequestError(str(e.reason))
except URLError as e:
raise NFSNAPIRequestError(str(e.reason))
class NFSNAPIRequestError(Exception):
"""Raised when an NearlyFreeSpeech.NET API request fails.
Every instance will have a "reason" attribute, a string with the
reason for the error. If the offending request resulted in a 4XX or
5XX HTTP response, the attribute will contain the "human-readable" and
debug error messages returned by the NearlyFreeSpeech.NET API,
separated by a new-line (for more information, see
<https://members.nearlyfreespeech.net/wiki/API/Introduction>).
"""
def __init__(self, reason):
Exception.__init__(self, reason)
self.reason = reason
|
unlicense
| -8,174,024,267,578,874,000
| 40.134328
| 88
| 0.717888
| false
| 3.742023
| false
| false
| false
|
jima80525/KidTasks
|
tasks/models.py
|
1
|
3216
|
""" Define the data models for the KidsTasks app """
import datetime
from django.db import models
class Kid(models.Model):
""" Defines the kids which have to do the tasks. """
name = models.CharField(max_length=256)
last_update_date = models.DateField(default=datetime.datetime.today)
days = [
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday'
]
def __str__(self):
return self.name
class Meta:
ordering = ['name', ]
def build_all_tasks(self):
""" Build a list of all tasks. From:
http://stackoverflow.com/questions/4720079/django-query-filter-with-\
variable-column """
tasks = []
for day in self.days:
qs = RepeatingTask.objects.filter(kid=self).filter(**{day: True})
tasks.append((day, [task for task in qs]))
return tasks
def populate_today(self):
""" Create new Tasks from Repeating tasks matching today's day of the
week."""
current_date = datetime.date.today()
day_name = datetime.datetime.now().strftime("%A").lower()
if current_date > self.last_update_date:
for rep_task in RepeatingTask.objects.filter(kid=self).filter(
**{day_name: True}):
date_task = Task(name=rep_task.name, date=current_date,
kid=self)
date_task.save()
self.last_update_date = current_date
self.save()
def update_with_new_repeating_task(self, new_task, cleaned_data):
""" Adds a new dated task to the list IF the newly created repeating
task is for today. Uses the cleaned data from the form to provide
a handy dict for day names."""
current_date = datetime.date.today()
day_name = datetime.datetime.now().strftime("%A").lower()
if cleaned_data[day_name]:
date_task = Task(name=new_task.name, date=current_date, kid=self)
date_task.save()
class Task(models.Model):
""" A Task is associated with a kid and a date. This is the actual thing
the kid has to do! """
name = models.CharField(max_length=256)
completed = models.BooleanField(default=False)
date = models.DateField(default=datetime.datetime.now)
kid = models.ForeignKey(Kid)
def __str__(self):
return "{0}:{1}-{2}".format(self.name, self.kid.name, str(self.date))
class Meta:
ordering = ['name', ]
class RepeatingTask(models.Model):
""" Defines a repeating task """
name = models.CharField(max_length=256)
kid = models.ForeignKey(Kid) # NOTE: RepeatingTasks are kid specific
monday = models.BooleanField(default=False)
tuesday = models.BooleanField(default=False)
wednesday = models.BooleanField(default=False)
thursday = models.BooleanField(default=False)
friday = models.BooleanField(default=False)
saturday = models.BooleanField(default=False)
sunday = models.BooleanField(default=False)
def __str__(self):
return "{0}:{1}".format(self.kid.name, self.name)
class Meta:
ordering = ['kid', 'name', ]
|
mit
| 8,298,939,762,771,274,000
| 33.956522
| 79
| 0.610386
| false
| 3.783529
| false
| false
| false
|
listyque/TACTIC-Handler
|
thlib/side/python_minifier/rename/mapper.py
|
1
|
5193
|
"""
For each node in an AST set the namespace to use for name binding and resolution
"""
import ast
from python_minifier.rename.util import is_namespace
def add_parent_to_arguments(arguments, func):
arguments.parent = func
arguments.namespace = func
for arg in arguments.args:
add_parent(arg, arguments, func)
if hasattr(arg, 'annotation') and arg.annotation is not None:
add_parent(arg.annotation, arguments, func.namespace)
if hasattr(arguments, 'kwonlyargs'):
for arg in arguments.kwonlyargs:
add_parent(arg, arguments, func)
if arg.annotation is not None:
add_parent(arg.annotation, arguments, func.namespace)
for node in arguments.kw_defaults:
if node is not None:
add_parent(node, arguments, func.namespace)
for node in arguments.defaults:
add_parent(node, arguments, func.namespace)
if arguments.vararg:
if hasattr(arguments, 'varargannotation') and arguments.varargannotation is not None:
add_parent(arguments.varargannotation, arguments, func.namespace)
elif isinstance(arguments.vararg, str):
pass
else:
add_parent(arguments.vararg, arguments, func)
if arguments.kwarg:
if hasattr(arguments, 'kwargannotation') and arguments.kwargannotation is not None:
add_parent(arguments.kwargannotation, arguments, func.namespace)
elif isinstance(arguments.kwarg, str):
pass
else:
add_parent(arguments.kwarg, arguments, func)
def add_parent_to_functiondef(functiondef):
"""
Add correct parent and namespace attributes to functiondef nodes
"""
if functiondef.args is not None:
add_parent_to_arguments(functiondef.args, func=functiondef)
for node in functiondef.body:
add_parent(node, parent=functiondef, namespace=functiondef)
for node in functiondef.decorator_list:
add_parent(node, parent=functiondef, namespace=functiondef.namespace)
if hasattr(functiondef, 'returns') and functiondef.returns is not None:
add_parent(functiondef.returns, parent=functiondef, namespace=functiondef.namespace)
def add_parent_to_classdef(classdef):
"""
Add correct parent and namespace attributes to classdef nodes
"""
for node in classdef.bases:
add_parent(node, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'keywords'):
for node in classdef.keywords:
add_parent(node, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'starargs') and classdef.starargs is not None:
add_parent(classdef.starargs, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'kwargs') and classdef.kwargs is not None:
add_parent(classdef.kwargs, parent=classdef, namespace=classdef.namespace)
for node in classdef.body:
add_parent(node, parent=classdef, namespace=classdef)
for node in classdef.decorator_list:
add_parent(node, parent=classdef, namespace=classdef.namespace)
def add_parent(node, parent=None, namespace=None):
"""
Add a parent attribute to child nodes
Add a namespace attribute to child nodes
:param node: The tree to add parent and namespace properties to
:type node: :class:`ast.AST`
:param parent: The parent node of this node
:type parent: :class:`ast.AST`
:param namespace: The namespace Node that this node is in
:type namespace: ast.Lambda or ast.Module or ast.FunctionDef or ast.AsyncFunctionDef or ast.ClassDef or ast.DictComp or ast.SetComp or ast.ListComp or ast.Generator
"""
node.parent = parent if parent is not None else node
node.namespace = namespace if namespace is not None else node
if is_namespace(node):
node.bindings = []
node.global_names = set()
node.nonlocal_names = set()
if isinstance(node, ast.FunctionDef) or (
hasattr(ast, 'AsyncFunctionDef') and isinstance(node, ast.AsyncFunctionDef)
):
add_parent_to_functiondef(node)
elif isinstance(node, ast.Lambda):
add_parent_to_arguments(node.args, func=node)
add_parent(node.body, parent=node, namespace=node)
elif isinstance(node, ast.ClassDef):
add_parent_to_classdef(node)
else:
for child in ast.iter_child_nodes(node):
add_parent(child, parent=node, namespace=node)
return
if isinstance(node, ast.comprehension):
add_parent(node.target, parent=node, namespace=namespace)
add_parent(node.iter, parent=node, namespace=namespace)
for if_ in node.ifs:
add_parent(if_, parent=node, namespace=namespace)
return
if isinstance(node, ast.Global):
namespace.global_names.update(node.names)
if hasattr(ast, 'Nonlocal') and isinstance(node, ast.Nonlocal):
namespace.nonlocal_names.update(node.names)
for child in ast.iter_child_nodes(node):
add_parent(child, parent=node, namespace=namespace)
def add_namespace(module):
add_parent(module)
|
epl-1.0
| -7,521,664,311,103,650,000
| 34.568493
| 168
| 0.677258
| false
| 4.028704
| false
| false
| false
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-sql/azure/mgmt/sql/models/server_metric.py
|
1
|
2274
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServerMetric(Model):
"""Represents server metrics.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Name of the server usage metric.
:vartype name: str
:ivar resource_name: The name of the resource.
:vartype resource_name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar current_value: The current value of the metric.
:vartype current_value: float
:ivar limit: The current limit of the metric.
:vartype limit: float
:ivar unit: The units of the metric.
:vartype unit: str
:ivar next_reset_time: The next reset time for the metric (ISO8601
format).
:vartype next_reset_time: datetime
"""
_validation = {
'name': {'readonly': True},
'resource_name': {'readonly': True},
'display_name': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'unit': {'readonly': True},
'next_reset_time': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'float'},
'limit': {'key': 'limit', 'type': 'float'},
'unit': {'key': 'unit', 'type': 'str'},
'next_reset_time': {'key': 'nextResetTime', 'type': 'iso-8601'},
}
def __init__(self):
self.name = None
self.resource_name = None
self.display_name = None
self.current_value = None
self.limit = None
self.unit = None
self.next_reset_time = None
|
mit
| 8,039,844,268,228,363,000
| 33.984615
| 76
| 0.5708
| false
| 4.119565
| false
| false
| false
|
elopio/snapcraft
|
tests/integration/general/test_clean_prime_step.py
|
1
|
3431
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
Contains,
DirExists,
FileExists,
Not
)
from tests import integration
class CleanPrimeStepTestCase(integration.TestCase):
def setUp(self):
super().setUp()
self.copy_project_to_cwd('independent-parts')
self.run_snapcraft('prime')
def test_clean_prime_step(self):
bindir = os.path.join(self.prime_dir, 'bin')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
output = self.run_snapcraft(
['clean', '--step=prime'], debug=False)
self.assertThat(self.prime_dir, Not(DirExists()))
self.assertThat(self.stage_dir, DirExists())
self.assertThat(self.parts_dir, DirExists())
# Assert that the priming area was removed wholesale, not a part at a
# time (since we didn't specify any parts).
self.assertThat(output, Contains("Cleaning up priming area"))
self.expectThat(output, Not(Contains('part1')))
self.expectThat(output, Not(Contains('part2')))
# Now try to prime again
self.run_snapcraft('prime')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
def test_clean_prime_step_single_part(self):
bindir = os.path.join(self.prime_dir, 'bin')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
self.run_snapcraft(['clean', 'part1', '--step=prime'])
self.assertThat(os.path.join(bindir, 'file1'), Not(FileExists()))
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
self.assertThat(self.stage_dir, DirExists())
self.assertThat(self.parts_dir, DirExists())
# Now try to prime again
self.run_snapcraft('prime')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
def test_clean_with_deprecated_strip_step(self):
bindir = os.path.join(self.prime_dir, 'bin')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
self.run_snapcraft(['clean', '--step=strip'])
self.assertThat(self.prime_dir, Not(DirExists()))
self.assertThat(self.stage_dir, DirExists())
self.assertThat(self.parts_dir, DirExists())
# Now try to prime again
self.run_snapcraft('prime')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
|
gpl-3.0
| -3,465,672,595,023,628,300
| 37.988636
| 77
| 0.660449
| false
| 3.611579
| true
| false
| false
|
fbcotter/dataset_loading
|
dataset_loading/pascal.py
|
1
|
1677
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pandas as pd
from dataset_loading import core
def img_sets():
"""
List all the image sets from Pascal VOC. Don't bother computing
this on the fly, just remember it. It's faster.
"""
return [
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train',
'tvmonitor']
def img_dict(base_dir):
d = {}
for i, cat in enumerate(img_sets()):
filename = os.path.join(base_dir, 'ImageSets', 'Main',
cat+'_trainval.txt')
df = pd.read_csv(filename, delim_whitespace=True, header=None,
names=['filename', 'true'])
df = df[df['true'] == 1]
files = df['filename'].values
for f in files:
if f in d.keys():
d[f].append(i)
else:
d[f] = [i]
return d
def load_pascal_data(data_dir, max_epochs=None, thread_count=3,
imsize=(128,128)):
"""Will use a filename queue and img_queue and load the data
"""
file_queue = core.FileQueue()
# d = img_dict(data_dir)
img_queue = core.ImageQueue(files_in_epoch=250, maxsize=1000)
threads = []
for i in range(thread_count):
thread = core.imLoader('Loader ' + str(i+1), file_queue, img_queue,
imsize, data_dir)
thread.start()
threads.append(thread)
return img_queue
|
mit
| -7,223,453,819,954,609,000
| 28.946429
| 75
| 0.543828
| false
| 3.568085
| false
| false
| false
|
DarkPhoenix6/My_Libraries
|
Python/Utils/mars.py
|
1
|
12802
|
from __future__ import division
import sys
import math
class Point2D(object):
def __init__(self, x, y):
self.x = x
self.y = y
def square_distance(self, other_point):
""" Calculates the square distance between this Point2D and another Point2D
:param other_point: The other Point2D
:return: The Square Distance
:rtype: float
"""
return (self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2
def __eq__(self, other_point):
""" Override the equals operator to compare coordinates
:param other_point: The other Point2D
:return: True if points are equal else False
:type: bool
"""
return self.x == other_point.x and self.y == other_point.y
def to_dict(self):
""" Converts point to python dict
:return: dict of x,y coordinates
:rtype: dict
"""
return {"x": self.x, "y": self.y}
def pythagoras_find_c(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def slope(self, other_point):
""" Calculates the slope between this point and another Point2D
:param other_point: The other point to find the slope with
:return: Slope as a float
"""
# TODO Find a better way to handle this error
if self.x == other_point.x:
return None
# cast to float just in case there is an integer passed in
return (self.y - other_point.y) / float(self.x - other_point.x)
def angle_deg(self, other_point):
""" Calculates the angle in degrees between this point and another Point2D
:param other_point: The other Point2D
:return: The angle in Degrees
"""
if self.x != other_point.x:
slope = other_point.slope(self)
if slope is not None:
return 180 * math.atan(slope) / math.pi
else:
# vertical line
return None
return 90 if other_point.y > self.y else -90
def pos_angle_deg(self, other_point):
angle = self.angle_deg(other_point)
return angle if angle >= 0 else angle + 180.0
@staticmethod
def intersect(point1, point2, point3, point4):
"""
caluculating the intersecting point that will be the new node
:param point1:
:param point2:
:param point3:
:param point4:
:return:
"""
c = (point2.y - point1.y) * (point3.x - point4.x) - (point1.x - point2.x) * (point4.y - point3.y)
if c != 0:
return Point2D(((point3.x - point4.x) * (point1.x * point2.y - point2.x * point1.y) - (point1.x - point2.x)
* (point3.x * point4.y - point4.x * point3.y)) / c,
(-(point4.y - point3.y) * (point1.x * point2.y - point2.x * point1.y) + (point2.y - point1.y)
* (point3.x * point4.y - point4.x * point3.y)) / c)
else:
return None
@staticmethod
def intersect_xy_mp(m, point1, point2, point3):
"""
caluculating the intersecting point that will be the new node
:param m: slope
:param point1:
:param point2:
:param point3:
:return:
"""
c = m * (point3.x - point2.x) + point2.y - point3.y
if abs(m) < 100:
if c != 0:
x_ = ((point3.x - point2.x) * (m * point1.x - point1.y + point2.y) + (point2.y - point3.y) * point2.x) \
/ c
return Point2D(x_, m * (x_ - point1.x) + point1.y)
elif point3.x != point2.x:
return Point2D(point1.x, (point1.y - point2.y) * (point3.y - point2.y) / (point3.x - point2.x) + point2.y)
return Point2D((point1.x + point2.x + point3.x) / 3, (point1.y + point2.y + point3.y) / 3)
def y_intercept(self, other):
slope = other.slope(self)
b = -1 * slope * self.x + self.y
return b
def __str__(self):
return "Point2D({},{})".format(self.x, self.y)
def __mul__(self, other):
if type(other) == type(self):
return Point2D(self.x * other.x, self.y * other.y)
else:
return Point2D(self.x * other, self.y * other)
def __rmul__(self, other):
return Point2D.__mul__(self, other)
def __add__(self, other):
if type(other) == type(self):
return Point2D(self.x + other.x, self.y + other.y)
else:
return Point2D(self.x + other, self.y + other)
def __radd__(self, other):
return Point2D.__add__(self, other)
def __sub__(self, other):
if type(other) == type(self):
return Point2D(self.x - other.x, self.y - other.y)
else:
return Point2D(self.x - other, self.y - other)
def __rsub__(self, other):
return Point2D.__sub__(other, self)
def __truediv__(self, other):
if type(other) == type(self):
return Point2D(self.x / other.x, self.y / other.y)
else:
return Point2D(self.x / other, self.y / other)
def __rtruediv__(self, other):
return Point2D.__truediv__(other, self)
@staticmethod
def find_distance(point1, point2):
""" finds the distance between points
:param point1:
:param point2:
:return:
"""
result = math.sqrt(point2.square_distance(point1))
return result
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
class Mars(object):
def __init__(self, x_arr: list, y_arr: list):
self.x_arr = x_arr
self.y_arr = y_arr
self.surface = []
self.flat_spots = []
self.gravity = 3.711
self.target = Point2D(0, 0)
for i in range(len(self.x_arr)):
self.surface.append(Point2D(self.x_arr[i], self.y_arr[i]))
if (i + 1) != len(self.x_arr):
temp = self.surface[-1]
temp2 = Point2D(self.x_arr[i + 1], self.y_arr[i + 1])
if (temp2.x - temp.x) >= 1000 and temp2.y == temp.y:
self.flat_spots = [temp, temp2]
self.target = Point2D(temp2.x - temp.x, temp.y)
slope = temp.slope(temp2)
b = temp.y_intercept(temp2)
if slope is not None:
for j in range(1, self.x_arr[i + 1] - self.x_arr[i]):
self.surface.append(Point2D(j, ((j * slope) + b)))
else:
pass
class MarsLander(object):
def __init__(self, mars: Mars, x, y, h_velocity, v_velocity, fuel, rotation, power):
self.mars = mars
self.current_position = Point2D(x, y)
self.current_velocity = Point2D(h_velocity, v_velocity)
self.velocity_angle = math.atan2(self.current_velocity.y, self.current_velocity.x)
self.fuel = fuel
self.rotation = rotation
self.power = power
def calculate_trajectory(self, target: Point2D):
temp = self.current_position + (self.current_velocity * 3)
print("Debug messages... Calculating Trajectory", temp, target, file=sys.stderr)
if temp.x - target.x != 0:
trajectory = temp.angle_deg(target)
# TODO
if temp.y < target.y:
return int(trajectory) * -1
else:
return int(trajectory) * -1
elif self.current_position.x - target.x != 0:
trajectory = temp.angle_deg(target)
# TODO
if temp.y < target.y:
return int(trajectory) * -1
else:
return int(trajectory) * -1
else:
return 0
def angle_of_reach(self, distance):
return (1 / 2) * math.asin(self.mars.gravity * distance / (self.current_velocity.pythagoras_find_c() ** 2))
def distance_traveled(self):
v = self.current_velocity.pythagoras_find_c()
theta = self.velocity_angle
g = self.mars.gravity
result1 = v * math.cos(theta) / g
result2 = (v * math.sin(theta)) + math.sqrt(((v * math.sin(theta)) ** 2) + 2 * g * self.current_position.y)
return result1 * result2
def time_of_flight(self):
v = self.current_velocity.pythagoras_find_c()
d = self.distance_traveled()
result =
return d /
def landing_sequence(self):
print("Debug messages... Initiaing Landing Sequence", file=sys.stderr)
if (self.mars.flat_spots[0].x + 10) <= self.current_position.x <= (self.mars.flat_spots[1].x - 10):
if -20 < self.current_velocity.x < 20:
print("Debug messages... 1", file=sys.stderr)
if self.current_velocity.y <= -30:
inst = "0 4"
else:
inst = "0 2"
else:
print("Debug messages... 2", file=sys.stderr)
inst = self.cancel_x_velocity()
else:
if -20 < self.current_velocity.x < 20:
print("Debug messages... 3", file=sys.stderr)
if self.mars.target.y < self.current_position.y:
trajectory = int(self.calculate_trajectory(self.mars.target))
if self.current_velocity.y <= -30:
power2 = 4
else:
power2 = 3
inst = str(trajectory) + " " + str(power2)
else:
trajectory = int(self.calculate_trajectory(Point2D(self.mars.target.x, self.mars.target.y + 200)))
power2 = 4
inst = str(trajectory) + " " + str(power2)
else:
print("Debug messages... 4", file=sys.stderr)
inst = self.cancel_x_velocity()
return inst
def cancel_x_velocity(self):
if -15 > self.current_velocity.x:
if -33 > self.current_velocity.x:
trajectory = str(-62)
power2 = str(4)
elif -15 > self.current_velocity.x:
if self.current_velocity.y <= -30:
power2 = str(4)
trajectory = str(-30)
else:
power2 = str(4)
trajectory = str(-45)
else:
if self.current_velocity.y <= -30:
trajectory = str(-45)
else:
trajectory = str(-73)
power2 = str(4)
else:
if 33 < self.current_velocity.x:
trajectory = str(62)
power2 = str(4)
if self.current_velocity.y <= -30:
power2 = str(4)
trajectory = str(30)
else:
power2 = str(4)
trajectory = str(45)
else:
if self.current_velocity.y <= -30:
trajectory = str(45)
else:
trajectory = str(73)
power2 = str(4)
inst = trajectory + " " + power2
return inst
surface_n = int(input()) # the number of points used to draw the surface of Mars.
x = []
y = []
for k in range(surface_n):
# land_x: X coordinate of a surface point. (0 to 6999)
# land_y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
land_x, land_y = [int(j) for j in input().split()]
x.append(land_x)
y.append(land_y)
# game loop
mars = Mars(x, y)
while True:
# h_speed: the horizontal speed (in m/s), can be negative.
# v_speed: the vertical speed (in m/s), can be negative.
# fuel: the quantity of remaining fuel in liters.
# rotate: the rotation angle in degrees (-90 to 90).
# power: the thrust power (0 to 4).
x1, y1, h_speed, v_speed, fuel, rotate, power = [int(i) for i in input().split()]
lander = MarsLander(mars, x1, y1, h_speed, v_speed, fuel, rotate, power)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
if lander.mars.flat_spots[0].x > lander.mars.flat_spots[1].x:
lander.mars.flat_spots[0], lander.mars.flat_spots[1] = lander.mars.flat_spots[1], lander.mars.flat_spots[0]
if ((lander.mars.flat_spots[0].x - 1000) <= lander.current_position.x <= (
lander.mars.flat_spots[1].x + 1000)) and lander.current_position.y > lander.mars.target.y:
comm = lander.landing_sequence()
print(comm)
# rotate power. rotate is the desired rotation angle. power is the desired thrust power.
else:
print(str(lander.calculate_trajectory(lander.mars.target)) + " 4")
|
gpl-3.0
| 4,510,197,727,863,930,400
| 36.542522
| 136
| 0.529605
| false
| 3.518966
| false
| false
| false
|
josanvel/BazarPapeleriaLulita
|
CodigoBazarLulita/pyBotonesReportes.py
|
1
|
1813
|
'''
Created on 15/03/2015
@author: josanvel
'''
from PyQt4 import QtCore, QtGui
from BotonesReportes import Ui_BotonesReportes
from pyReporteGanancia import MyformReporteGanancias
from pyReporteProducto import MyformReporteProductos
class MyformBotonesReportes(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.uiBotonesReportes= Ui_BotonesReportes()
self.uiBotonesReportes.setupUi(self)
self.center()
self.connect(self.uiBotonesReportes.btnRegresarReportes, QtCore.SIGNAL("clicked()"), self.regresarReportes)
self.connect(self.uiBotonesReportes.btnReporteGanancias, QtCore.SIGNAL("clicked()"), self.entrarReporteGanancias)
self.connect(self.uiBotonesReportes.btnReporteProductos, QtCore.SIGNAL("clicked()"), self.entrarReporteProductos)
def entrarReporteGanancias(self):
self.hide()
self.reporteGanancias = MyformReporteGanancias()
self.reporteGanancias.regresarVentanaR(self)
self.reporteGanancias.show()
def entrarReporteProductos(self):
self.hide()
self.reporteProductos = MyformReporteProductos()
self.reporteProductos.regresarVentanaR(self)
self.reporteProductos.show()
def regresarVentanaR(self,ventanaAtras):
self.ventana = ventanaAtras
def regresarReportes(self):
self.hide()
self.ventana.show()
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
|
gpl-2.0
| 846,794,267,647,496,200
| 35.28
| 129
| 0.63155
| false
| 3.5
| false
| false
| false
|
jpacg/su-binary
|
jni/selinux/python/sepolicy/sepolicy/manpage.py
|
1
|
41002
|
#! /usr/bin/python -Es
# Copyright (C) 2012-2013 Red Hat
# AUTHOR: Dan Walsh <dwalsh@redhat.com>
# AUTHOR: Miroslav Grepl <mgrepl@redhat.com>
# see file 'COPYING' for use and warranty information
#
# semanage is a tool for managing SELinux configuration files
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
#
__all__ = ['ManPage', 'HTMLManPages', 'manpage_domains', 'manpage_roles', 'gen_domains']
import string
import selinux
import sepolicy
import os
import time
typealias_types = {
"antivirus_t":("amavis_t", "clamd_t", "clamscan_t", "freshclam_t"),
"cluster_t":("rgmanager_t", "corosync_t", "aisexec_t", "pacemaker_t"),
"svirt_t":("qemu_t"),
"httpd_t":("phpfpm_t"),
}
equiv_dict = {"smbd": ["samba"], "httpd": ["apache"], "virtd": ["virt", "libvirt"], "named": ["bind"], "fsdaemon": ["smartmon"], "mdadm": ["raid"]}
equiv_dirs = ["/var"]
modules_dict = None
def gen_modules_dict(path="/usr/share/selinux/devel/policy.xml"):
global modules_dict
if modules_dict:
return modules_dict
import xml.etree.ElementTree
modules_dict = {}
try:
tree = xml.etree.ElementTree.fromstring(sepolicy.policy_xml(path))
for l in tree.findall("layer"):
for m in l.findall("module"):
name = m.get("name")
if name == "user" or name == "unconfined":
continue
if name == "unprivuser":
name = "user"
if name == "unconfineduser":
name = "unconfined"
for b in m.findall("summary"):
modules_dict[name] = b.text
except IOError:
pass
return modules_dict
users = None
users_range = None
def get_all_users_info():
global users
global users_range
if users and users_range:
return users, users_range
users = []
users_range = {}
allusers = []
allusers_info = sepolicy.info(sepolicy.USER)
for d in allusers_info:
allusers.append(d['name'])
users_range[d['name'].split("_")[0]] = d['range']
for u in allusers:
if u not in ["system_u", "root", "unconfined_u"]:
users.append(u.replace("_u", ""))
users.sort()
return users, users_range
all_entrypoints = None
def get_entrypoints():
global all_entrypoints
if not all_entrypoints:
all_entrypoints = next(sepolicy.info(sepolicy.ATTRIBUTE, "entry_type"))["types"]
return all_entrypoints
domains = None
def gen_domains():
global domains
if domains:
return domains
domains = []
for d in sepolicy.get_all_domains():
found = False
domain = d[:-2]
# if domain + "_exec_t" not in get_entrypoints():
# continue
if domain in domains:
continue
domains.append(domain)
for role in sepolicy.get_all_roles():
if role[:-2] in domains or role == "system_r":
continue
domains.append(role[:-2])
domains.sort()
return domains
types = None
def _gen_types():
global types
if types:
return types
all_types = sepolicy.info(sepolicy.TYPE)
types = {}
for rec in all_types:
try:
types[rec["name"]] = rec["attributes"]
except:
types[rec["name"]] = []
return types
def prettyprint(f, trim):
return " ".join(f[:-len(trim)].split("_"))
# for HTML man pages
manpage_domains = []
manpage_roles = []
fedora_releases = ["Fedora17", "Fedora18"]
rhel_releases = ["RHEL6", "RHEL7"]
def get_alphabet_manpages(manpage_list):
alphabet_manpages = dict.fromkeys(string.ascii_letters, [])
for i in string.ascii_letters:
temp = []
for j in manpage_list:
if j.split("/")[-1][0] == i:
temp.append(j.split("/")[-1])
alphabet_manpages[i] = temp
return alphabet_manpages
def convert_manpage_to_html(html_manpage, manpage):
try:
from commands import getstatusoutput
except ImportError:
from subprocess import getstatusoutput
rc, output = getstatusoutput("/usr/bin/groff -man -Thtml %s 2>/dev/null" % manpage)
if rc == 0:
print(html_manpage, "has been created")
fd = open(html_manpage, 'w')
fd.write(output)
fd.close()
class HTMLManPages:
"""
Generate a HHTML Manpages on an given SELinux domains
"""
def __init__(self, manpage_roles, manpage_domains, path, os_version):
self.manpage_roles = get_alphabet_manpages(manpage_roles)
self.manpage_domains = get_alphabet_manpages(manpage_domains)
self.os_version = os_version
self.old_path = path + "/"
self.new_path = self.old_path + self.os_version + "/"
if self.os_version in fedora_releases or rhel_releases:
self.__gen_html_manpages()
else:
print("SELinux HTML man pages can not be generated for this %s" % os_version)
exit(1)
def __gen_html_manpages(self):
self._write_html_manpage()
self._gen_index()
self._gen_body()
self._gen_css()
def _write_html_manpage(self):
if not os.path.isdir(self.new_path):
os.mkdir(self.new_path)
for domain in self.manpage_domains.values():
if len(domain):
for d in domain:
convert_manpage_to_html((self.new_path + d.rsplit("_selinux", 1)[0] + ".html"), self.old_path + d)
for role in self.manpage_roles.values():
if len(role):
for r in role:
convert_manpage_to_html((self.new_path + r.rsplit("_selinux", 1)[0] + ".html"), self.old_path + r)
def _gen_index(self):
index = self.old_path + "index.html"
fd = open(index, 'w')
fd.write("""
<html>
<head>
<link rel=stylesheet type="text/css" href="style.css" title="style">
<title>SELinux man pages online</title>
</head>
<body>
<h1>SELinux man pages</h1>
<br></br>
Fedora or Red Hat Enterprise Linux Man Pages.</h2>
<br></br>
<hr>
<h3>Fedora</h3>
<table><tr>
<td valign="middle">
</td>
</tr></table>
<pre>
""")
for f in fedora_releases:
fd.write("""
<a href=%s/%s.html>%s</a> - SELinux man pages for %s """ % (f, f, f, f))
fd.write("""
</pre>
<hr>
<h3>RHEL</h3>
<table><tr>
<td valign="middle">
</td>
</tr></table>
<pre>
""")
for r in rhel_releases:
fd.write("""
<a href=%s/%s.html>%s</a> - SELinux man pages for %s """ % (r, r, r, r))
fd.write("""
</pre>
""")
fd.close()
print("%s has been created") % index
def _gen_body(self):
html = self.new_path + self.os_version + ".html"
fd = open(html, 'w')
fd.write("""
<html>
<head>
<link rel=stylesheet type="text/css" href="../style.css" title="style">
<title>Linux man-pages online for Fedora18</title>
</head>
<body>
<h1>SELinux man pages for Fedora18</h1>
<hr>
<table><tr>
<td valign="middle">
<h3>SELinux roles</h3>
""")
for letter in self.manpage_roles:
if len(self.manpage_roles[letter]):
fd.write("""
<a href=#%s_role>%s</a>"""
% (letter, letter))
fd.write("""
</td>
</tr></table>
<pre>
""")
rolename_body = ""
for letter in self.manpage_roles:
if len(self.manpage_roles[letter]):
rolename_body += "<p>"
for r in self.manpage_roles[letter]:
rolename = r.rsplit("_selinux", 1)[0]
rolename_body += "<a name=%s_role></a><a href=%s.html>%s_selinux(8)</a> - Security Enhanced Linux Policy for the %s SELinux user\n" % (letter, rolename, rolename, rolename)
fd.write("""%s
</pre>
<hr>
<table><tr>
<td valign="middle">
<h3>SELinux domains</h3>"""
% rolename_body)
for letter in self.manpage_domains:
if len(self.manpage_domains[letter]):
fd.write("""
<a href=#%s_domain>%s</a>
""" % (letter, letter))
fd.write("""
</td>
</tr></table>
<pre>
""")
domainname_body = ""
for letter in self.manpage_domains:
if len(self.manpage_domains[letter]):
domainname_body += "<p>"
for r in self.manpage_domains[letter]:
domainname = r.rsplit("_selinux", 1)[0]
domainname_body += "<a name=%s_domain></a><a href=%s.html>%s_selinux(8)</a> - Security Enhanced Linux Policy for the %s SELinux processes\n" % (letter, domainname, domainname, domainname)
fd.write("""%s
</pre>
</body>
</html>
""" % domainname_body)
fd.close()
print("%s has been created") % html
def _gen_css(self):
style_css = self.old_path + "style.css"
fd = open(style_css, 'w')
fd.write("""
html, body {
background-color: #fcfcfc;
font-family: arial, sans-serif;
font-size: 110%;
color: #333;
}
h1, h2, h3, h4, h5, h5 {
color: #2d7c0b;
font-family: arial, sans-serif;
margin-top: 25px;
}
a {
color: #336699;
text-decoration: none;
}
a:visited {
color: #4488bb;
}
a:hover, a:focus, a:active {
color: #07488A;
text-decoration: none;
}
a.func {
color: red;
text-decoration: none;
}
a.file {
color: red;
text-decoration: none;
}
pre.code {
background-color: #f4f0f4;
// font-family: monospace, courier;
font-size: 110%;
margin-left: 0px;
margin-right: 60px;
padding-top: 5px;
padding-bottom: 5px;
padding-left: 8px;
padding-right: 8px;
border: 1px solid #AADDAA;
}
.url {
font-family: serif;
font-style: italic;
color: #440064;
}
""")
fd.close()
print("%s has been created") % style_css
class ManPage:
"""
Generate a Manpage on an SELinux domain in the specified path
"""
modules_dict = None
enabled_str = ["Disabled", "Enabled"]
def __init__(self, domainname, path="/tmp", root="/", source_files=False, html=False):
self.html = html
self.source_files = source_files
self.root = root
self.portrecs = sepolicy.gen_port_dict()[0]
self.domains = gen_domains()
self.all_domains = sepolicy.get_all_domains()
self.all_attributes = sepolicy.get_all_attributes()
self.all_bools = sepolicy.get_all_bools()
self.all_port_types = sepolicy.get_all_port_types()
self.all_roles = sepolicy.get_all_roles()
self.all_users = get_all_users_info()[0]
self.all_users_range = get_all_users_info()[1]
self.all_file_types = sepolicy.get_all_file_types()
self.role_allows = sepolicy.get_all_role_allows()
self.types = _gen_types()
if self.source_files:
self.fcpath = self.root + "file_contexts"
else:
self.fcpath = self.root + selinux.selinux_file_context_path()
self.fcdict = sepolicy.get_fcdict(self.fcpath)
if not os.path.exists(path):
os.makedirs(path)
self.path = path
if self.source_files:
self.xmlpath = self.root + "policy.xml"
else:
self.xmlpath = self.root + "/usr/share/selinux/devel/policy.xml"
self.booleans_dict = sepolicy.gen_bool_dict(self.xmlpath)
self.domainname, self.short_name = sepolicy.gen_short_name(domainname)
self.type = self.domainname + "_t"
self._gen_bools()
self.man_page_path = "%s/%s_selinux.8" % (path, self.domainname)
self.fd = open(self.man_page_path, 'w')
if self.domainname + "_r" in self.all_roles:
self.__gen_user_man_page()
if self.html:
manpage_roles.append(self.man_page_path)
else:
if self.html:
manpage_domains.append(self.man_page_path)
self.__gen_man_page()
self.fd.close()
for k in equiv_dict.keys():
if k == self.domainname:
for alias in equiv_dict[k]:
self.__gen_man_page_link(alias)
def _gen_bools(self):
self.bools = []
self.domainbools = []
types = [self.type]
if self.domainname in equiv_dict:
for t in equiv_dict[self.domainname]:
if t + "_t" in self.all_domains:
types.append(t + "_t")
for t in types:
domainbools, bools = sepolicy.get_bools(t)
self.bools += bools
self.domainbools += domainbools
self.bools.sort()
self.domainbools.sort()
def get_man_page_path(self):
return self.man_page_path
def __gen_user_man_page(self):
self.role = self.domainname + "_r"
if not self.modules_dict:
self.modules_dict = gen_modules_dict(self.xmlpath)
try:
self.desc = self.modules_dict[self.domainname]
except:
self.desc = "%s user role" % self.domainname
if self.domainname in self.all_users:
self.attributes = next(sepolicy.info(sepolicy.TYPE, (self.type)))["attributes"]
self._user_header()
self._user_attribute()
self._can_sudo()
self._xwindows_login()
# until a new policy build with login_userdomain attribute
#self.terminal_login()
self._network()
self._booleans()
self._home_exec()
self._transitions()
else:
self._role_header()
self._booleans()
self._port_types()
self._mcs_types()
self._writes()
self._footer()
def __gen_man_page_link(self, alias):
path = "%s/%s_selinux.8" % (self.path, alias)
self.fd = open("%s/%s_selinux.8" % (self.path, alias), 'w')
self.fd.write(".so man8/%s_selinux.8" % self.domainname)
self.fd.close()
print(path)
def __gen_man_page(self):
self.anon_list = []
self.attributes = {}
self.ptypes = []
self._get_ptypes()
for domain_type in self.ptypes:
try:
if typealias_types[domain_type]:
fd = self.fd
man_page_path = self.man_page_path
for t in typealias_types[domain_type]:
self._typealias_gen_man(t)
self.fd = fd
self.man_page_path = man_page_path
except KeyError:
continue;
self.attributes[domain_type] = next(sepolicy.info(sepolicy.TYPE, ("%s") % domain_type))["attributes"]
self._header()
self._entrypoints()
self._process_types()
self._mcs_types()
self._booleans()
self._nsswitch_domain()
self._port_types()
self._writes()
self._file_context()
self._public_content()
self._footer()
def _get_ptypes(self):
for f in self.all_domains:
if f.startswith(self.short_name) or f.startswith(self.domainname):
self.ptypes.append(f)
def _typealias_gen_man(self, t):
self.man_page_path = "%s/%s_selinux.8" % (self.path, t[:-2])
self.ports = []
self.booltext = ""
self.fd = open(self.man_page_path, 'w')
self._typealias(t[:-2])
self._footer()
self.fd.close()
def _typealias(self,typealias):
self.fd.write('.TH "%(typealias)s_selinux" "8" "%(date)s" "%(typealias)s" "SELinux Policy %(typealias)s"'
% {'typealias':typealias, 'date': time.strftime("%y-%m-%d")})
self.fd.write(r"""
.SH "NAME"
%(typealias)s_selinux \- Security Enhanced Linux Policy for the %(typealias)s processes
.SH "DESCRIPTION"
%(typealias)s_t SELinux domain type is now associated with %(domainname)s domain type (%(domainname)s_t).
""" % {'typealias':typealias, 'domainname':self.domainname})
self.fd.write(r"""
Please see
.B %(domainname)s_selinux
man page for more details.
""" % {'domainname':self.domainname})
def _header(self):
self.fd.write('.TH "%(domainname)s_selinux" "8" "%(date)s" "%(domainname)s" "SELinux Policy %(domainname)s"'
% {'domainname': self.domainname, 'date': time.strftime("%y-%m-%d")})
self.fd.write(r"""
.SH "NAME"
%(domainname)s_selinux \- Security Enhanced Linux Policy for the %(domainname)s processes
.SH "DESCRIPTION"
Security-Enhanced Linux secures the %(domainname)s processes via flexible mandatory access control.
The %(domainname)s processes execute with the %(domainname)s_t SELinux type. You can check if you have these processes running by executing the \fBps\fP command with the \fB\-Z\fP qualifier.
For example:
.B ps -eZ | grep %(domainname)s_t
""" % {'domainname': self.domainname})
def _format_boolean_desc(self, b):
desc = self.booleans_dict[b][2][0].lower() + self.booleans_dict[b][2][1:]
if desc[-1] == ".":
desc = desc[:-1]
return desc
def _gen_bool_text(self):
booltext = ""
for b, enabled in self.domainbools + self.bools:
if b.endswith("anon_write") and b not in self.anon_list:
self.anon_list.append(b)
else:
if b not in self.booleans_dict:
continue
booltext += """
.PP
If you want to %s, you must turn on the %s boolean. %s by default.
.EX
.B setsebool -P %s 1
.EE
""" % (self._format_boolean_desc(b), b, self.enabled_str[enabled], b)
return booltext
def _booleans(self):
self.booltext = self._gen_bool_text()
if self.booltext != "":
self.fd.write("""
.SH BOOLEANS
SELinux policy is customizable based on least access required. %s policy is extremely flexible and has several booleans that allow you to manipulate the policy and run %s with the tightest access possible.
""" % (self.domainname, self.domainname))
self.fd.write(self.booltext)
def _nsswitch_domain(self):
nsswitch_types = []
nsswitch_booleans = ['authlogin_nsswitch_use_ldap', 'kerberos_enabled']
nsswitchbooltext = ""
for k in self.attributes.keys():
if "nsswitch_domain" in self.attributes[k]:
nsswitch_types.append(k)
if len(nsswitch_types):
self.fd.write("""
.SH NSSWITCH DOMAIN
""")
for b in nsswitch_booleans:
nsswitchbooltext += """
.PP
If you want to %s for the %s, you must turn on the %s boolean.
.EX
.B setsebool -P %s 1
.EE
""" % (self._format_boolean_desc(b), (", ".join(nsswitch_types)), b, b)
self.fd.write(nsswitchbooltext)
def _process_types(self):
if len(self.ptypes) == 0:
return
self.fd.write(r"""
.SH PROCESS TYPES
SELinux defines process types (domains) for each process running on the system
.PP
You can see the context of a process using the \fB\-Z\fP option to \fBps\bP
.PP
Policy governs the access confined processes have to files.
SELinux %(domainname)s policy is very flexible allowing users to setup their %(domainname)s processes in as secure a method as possible.
.PP
The following process types are defined for %(domainname)s:
""" % {'domainname': self.domainname})
self.fd.write("""
.EX
.B %s
.EE""" % ", ".join(self.ptypes))
self.fd.write("""
.PP
Note:
.B semanage permissive -a %(domainname)s_t
can be used to make the process type %(domainname)s_t permissive. SELinux does not deny access to permissive process types, but the AVC (SELinux denials) messages are still generated.
""" % {'domainname': self.domainname})
def _port_types(self):
self.ports = []
for f in self.all_port_types:
if f.startswith(self.short_name) or f.startswith(self.domainname):
self.ports.append(f)
if len(self.ports) == 0:
return
self.fd.write("""
.SH PORT TYPES
SELinux defines port types to represent TCP and UDP ports.
.PP
You can see the types associated with a port by using the following command:
.B semanage port -l
.PP
Policy governs the access confined processes have to these ports.
SELinux %(domainname)s policy is very flexible allowing users to setup their %(domainname)s processes in as secure a method as possible.
.PP
The following port types are defined for %(domainname)s:""" % {'domainname': self.domainname})
for p in self.ports:
self.fd.write("""
.EX
.TP 5
.B %s
.TP 10
.EE
""" % p)
once = True
for prot in ("tcp", "udp"):
if (p, prot) in self.portrecs:
if once:
self.fd.write("""
Default Defined Ports:""")
once = False
self.fd.write(r"""
%s %s
.EE""" % (prot, ",".join(self.portrecs[(p, prot)])))
def _file_context(self):
flist = []
mpaths = []
for f in self.all_file_types:
if f.startswith(self.domainname):
flist.append(f)
if f in self.fcdict:
mpaths = mpaths + self.fcdict[f]["regex"]
if len(mpaths) == 0:
return
mpaths.sort()
mdirs = {}
for mp in mpaths:
found = False
for md in mdirs:
if mp.startswith(md):
mdirs[md].append(mp)
found = True
break
if not found:
for e in equiv_dirs:
if mp.startswith(e) and mp.endswith('(/.*)?'):
mdirs[mp[:-6]] = []
break
equiv = []
for m in mdirs:
if len(mdirs[m]) > 0:
equiv.append(m)
self.fd.write(r"""
.SH FILE CONTEXTS
SELinux requires files to have an extended attribute to define the file type.
.PP
You can see the context of a file using the \fB\-Z\fP option to \fBls\bP
.PP
Policy governs the access confined processes have to these files.
SELinux %(domainname)s policy is very flexible allowing users to setup their %(domainname)s processes in as secure a method as possible.
.PP
""" % {'domainname': self.domainname})
if len(equiv) > 0:
self.fd.write(r"""
.PP
.B EQUIVALENCE DIRECTORIES
""")
for e in equiv:
self.fd.write(r"""
.PP
%(domainname)s policy stores data with multiple different file context types under the %(equiv)s directory. If you would like to store the data in a different directory you can use the semanage command to create an equivalence mapping. If you wanted to store this data under the /srv dirctory you would execute the following command:
.PP
.B semanage fcontext -a -e %(equiv)s /srv/%(alt)s
.br
.B restorecon -R -v /srv/%(alt)s
.PP
""" % {'domainname': self.domainname, 'equiv': e, 'alt': e.split('/')[-1]})
self.fd.write(r"""
.PP
.B STANDARD FILE CONTEXT
SELinux defines the file context types for the %(domainname)s, if you wanted to
store files with these types in a diffent paths, you need to execute the semanage command to sepecify alternate labeling and then use restorecon to put the labels on disk.
.B semanage fcontext -a -t %(type)s '/srv/%(domainname)s/content(/.*)?'
.br
.B restorecon -R -v /srv/my%(domainname)s_content
Note: SELinux often uses regular expressions to specify labels that match multiple files.
""" % {'domainname': self.domainname, "type": flist[0]})
self.fd.write(r"""
.I The following file types are defined for %(domainname)s:
""" % {'domainname': self.domainname})
for f in flist:
self.fd.write("""
.EX
.PP
.B %s
.EE
- %s
""" % (f, sepolicy.get_description(f)))
if f in self.fcdict:
plural = ""
if len(self.fcdict[f]["regex"]) > 1:
plural = "s"
self.fd.write("""
.br
.TP 5
Path%s:
%s""" % (plural, self.fcdict[f]["regex"][0]))
for x in self.fcdict[f]["regex"][1:]:
self.fd.write(", %s" % x)
self.fd.write("""
.PP
Note: File context can be temporarily modified with the chcon command. If you want to permanently change the file context you need to use the
.B semanage fcontext
command. This will modify the SELinux labeling database. You will need to use
.B restorecon
to apply the labels.
""")
def _see_also(self):
ret = ""
for d in self.domains:
if d == self.domainname:
continue
if d.startswith(self.short_name):
ret += ", %s_selinux(8)" % d
if d.startswith(self.domainname + "_"):
ret += ", %s_selinux(8)" % d
self.fd.write(ret)
def _public_content(self):
if len(self.anon_list) > 0:
self.fd.write("""
.SH SHARING FILES
If you want to share files with multiple domains (Apache, FTP, rsync, Samba), you can set a file context of public_content_t and public_content_rw_t. These context allow any of the above domains to read the content. If you want a particular domain to write to the public_content_rw_t domain, you must set the appropriate boolean.
.TP
Allow %(domainname)s servers to read the /var/%(domainname)s directory by adding the public_content_t file type to the directory and by restoring the file type.
.PP
.B
semanage fcontext -a -t public_content_t "/var/%(domainname)s(/.*)?"
.br
.B restorecon -F -R -v /var/%(domainname)s
.pp
.TP
Allow %(domainname)s servers to read and write /var/%(domainname)s/incoming by adding the public_content_rw_t type to the directory and by restoring the file type. You also need to turn on the %(domainname)s_anon_write boolean.
.PP
.B
semanage fcontext -a -t public_content_rw_t "/var/%(domainname)s/incoming(/.*)?"
.br
.B restorecon -F -R -v /var/%(domainname)s/incoming
.br
.B setsebool -P %(domainname)s_anon_write 1
""" % {'domainname': self.domainname})
for b in self.anon_list:
desc = self.booleans_dict[b][2][0].lower() + self.booleans_dict[b][2][1:]
self.fd.write("""
.PP
If you want to %s, you must turn on the %s boolean.
.EX
.B setsebool -P %s 1
.EE
""" % (desc, b, b))
def _footer(self):
self.fd.write("""
.SH "COMMANDS"
.B semanage fcontext
can also be used to manipulate default file context mappings.
.PP
.B semanage permissive
can also be used to manipulate whether or not a process type is permissive.
.PP
.B semanage module
can also be used to enable/disable/install/remove policy modules.
""")
if len(self.ports) > 0:
self.fd.write("""
.B semanage port
can also be used to manipulate the port definitions
""")
if self.booltext != "":
self.fd.write("""
.B semanage boolean
can also be used to manipulate the booleans
""")
self.fd.write("""
.PP
.B system-config-selinux
is a GUI tool available to customize SELinux policy settings.
.SH AUTHOR
This manual page was auto-generated using
.B "sepolicy manpage".
.SH "SEE ALSO"
selinux(8), %s(8), semanage(8), restorecon(8), chcon(1), sepolicy(8)
""" % (self.domainname))
if self.booltext != "":
self.fd.write(", setsebool(8)")
self._see_also()
def _valid_write(self, check, attributes):
if check in [self.type, "domain"]:
return False
if check.endswith("_t"):
for a in attributes:
if a in self.types[check]:
return False
return True
def _entrypoints(self):
entrypoints = [x['target'] for x in sepolicy.search([sepolicy.ALLOW], {'source': self.type, 'permlist': ['entrypoint'], 'class': 'file'})]
if len(entrypoints) == 0:
return
self.fd.write("""
.SH "ENTRYPOINTS"
""")
if len(entrypoints) > 1:
entrypoints_str = "\\fB%s\\fP file types" % ", ".join(entrypoints)
else:
entrypoints_str = "\\fB%s\\fP file type" % entrypoints[0]
self.fd.write("""
The %s_t SELinux type can be entered via the %s.
The default entrypoint paths for the %s_t domain are the following:
""" % (self.domainname, entrypoints_str, self.domainname))
if "bin_t" in entrypoints:
entrypoints.remove("bin_t")
self.fd.write("""
All executeables with the default executable label, usually stored in /usr/bin and /usr/sbin.""")
paths = []
for entrypoint in entrypoints:
if entrypoint in self.fcdict:
paths += self.fcdict[entrypoint]["regex"]
self.fd.write("""
%s""" % ", ".join(paths))
def _mcs_types(self):
mcs_constrained_type = next(sepolicy.info(sepolicy.ATTRIBUTE, "mcs_constrained_type"))
if self.type not in mcs_constrained_type['types']:
return
self.fd.write ("""
.SH "MCS Constrained"
The SELinux process type %(type)s_t is an MCS (Multi Category Security) constrained type. Sometimes this separation is referred to as sVirt. These types are usually used for securing multi-tenant environments, such as virtualization, containers or separation of users. The tools used to launch MCS types, pick out a different MCS label for each process group.
For example one process might be launched with %(type)s_t:s0:c1,c2, and another process launched with %(type)s_t:s0:c3,c4. The SELinux kernel only allows these processes can only write to content with a matching MCS label, or a MCS Label of s0. A process running with the MCS level of s0:c1,c2 is not allowed to write to content with the MCS label of s0:c3,c4
""" % {'type': self.domainname})
def _writes(self):
permlist = sepolicy.search([sepolicy.ALLOW], {'source': self.type, 'permlist': ['open', 'write'], 'class': 'file'})
if permlist is None or len(permlist) == 0:
return
all_writes = []
attributes = ["proc_type", "sysctl_type"]
for i in permlist:
if not i['target'].endswith("_t"):
attributes.append(i['target'])
for i in permlist:
if self._valid_write(i['target'], attributes):
if i['target'] not in all_writes:
all_writes.append(i['target'])
if len(all_writes) == 0:
return
self.fd.write("""
.SH "MANAGED FILES"
""")
self.fd.write("""
The SELinux process type %s_t can manage files labeled with the following file types. The paths listed are the default paths for these file types. Note the processes UID still need to have DAC permissions.
""" % self.domainname)
all_writes.sort()
if "file_type" in all_writes:
all_writes = ["file_type"]
for f in all_writes:
self.fd.write("""
.br
.B %s
""" % f)
if f in self.fcdict:
for path in self.fcdict[f]["regex"]:
self.fd.write("""\t%s
.br
""" % path)
def _get_users_range(self):
if self.domainname in self.all_users_range:
return self.all_users_range[self.domainname]
return "s0"
def _user_header(self):
self.fd.write('.TH "%(type)s_selinux" "8" "%(type)s" "mgrepl@redhat.com" "%(type)s SELinux Policy documentation"'
% {'type': self.domainname})
self.fd.write(r"""
.SH "NAME"
%(user)s_u \- \fB%(desc)s\fP - Security Enhanced Linux Policy
.SH DESCRIPTION
\fB%(user)s_u\fP is an SELinux User defined in the SELinux
policy. SELinux users have default roles, \fB%(user)s_r\fP. The
default role has a default type, \fB%(user)s_t\fP, associated with it.
The SELinux user will usually login to a system with a context that looks like:
.B %(user)s_u:%(user)s_r:%(user)s_t:%(range)s
Linux users are automatically assigned an SELinux users at login.
Login programs use the SELinux User to assign initial context to the user's shell.
SELinux policy uses the context to control the user's access.
By default all users are assigned to the SELinux user via the \fB__default__\fP flag
On Targeted policy systems the \fB__default__\fP user is assigned to the \fBunconfined_u\fP SELinux user.
You can list all Linux User to SELinux user mapping using:
.B semanage login -l
If you wanted to change the default user mapping to use the %(user)s_u user, you would execute:
.B semanage login -m -s %(user)s_u __default__
""" % {'desc': self.desc, 'type': self.type, 'user': self.domainname, 'range': self._get_users_range()})
if "login_userdomain" in self.attributes and "login_userdomain" in self.all_attributes:
self.fd.write("""
If you want to map the one Linux user (joe) to the SELinux user %(user)s, you would execute:
.B $ semanage login -a -s %(user)s_u joe
""" % {'user': self.domainname})
def _can_sudo(self):
sudotype = "%s_sudo_t" % self.domainname
self.fd.write("""
.SH SUDO
""")
if sudotype in self.types:
role = self.domainname + "_r"
self.fd.write("""
The SELinux user %(user)s can execute sudo.
You can set up sudo to allow %(user)s to transition to an administrative domain:
Add one or more of the following record to sudoers using visudo.
""" % {'user': self.domainname})
for adminrole in self.role_allows[role]:
self.fd.write("""
USERNAME ALL=(ALL) ROLE=%(admin)s_r TYPE=%(admin)s_t COMMAND
.br
sudo will run COMMAND as %(user)s_u:%(admin)s_r:%(admin)s_t:LEVEL
""" % {'admin': adminrole[:-2], 'user': self.domainname})
self.fd.write("""
You might also need to add one or more of these new roles to your SELinux user record.
List the SELinux roles your SELinux user can reach by executing:
.B $ semanage user -l |grep selinux_name
Modify the roles list and add %(user)s_r to this list.
.B $ semanage user -m -R '%(roles)s' %(user)s_u
For more details you can see semanage man page.
""" % {'user': self.domainname, "roles": " ".join([role] + self.role_allows[role])})
else:
self.fd.write("""
The SELinux type %s_t is not allowed to execute sudo.
""" % self.domainname)
def _user_attribute(self):
self.fd.write("""
.SH USER DESCRIPTION
""")
if "unconfined_usertype" in self.attributes:
self.fd.write("""
The SELinux user %s_u is an unconfined user. It means that a mapped Linux user to this SELinux user is supposed to be allow all actions.
""" % self.domainname)
if "unpriv_userdomain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is defined in policy as a unprivileged user. SELinux prevents unprivileged users from doing administration tasks without transitioning to a different role.
""" % self.domainname)
if "admindomain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is an admin user. It means that a mapped Linux user to this SELinux user is intended for administrative actions. Usually this is assigned to a root Linux user.
""" % self.domainname)
def _xwindows_login(self):
if "x_domain" in self.all_attributes:
self.fd.write("""
.SH X WINDOWS LOGIN
""")
if "x_domain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is able to X Windows login.
""" % self.domainname)
else:
self.fd.write("""
The SELinux user %s_u is not able to X Windows login.
""" % self.domainname)
def _terminal_login(self):
if "login_userdomain" in self.all_attributes:
self.fd.write("""
.SH TERMINAL LOGIN
""")
if "login_userdomain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is able to terminal login.
""" % self.domainname)
else:
self.fd.write("""
The SELinux user %s_u is not able to terminal login.
""" % self.domainname)
def _network(self):
from sepolicy import network
self.fd.write("""
.SH NETWORK
""")
for net in ("tcp", "udp"):
portdict = network.get_network_connect(self.type, net, "name_bind")
if len(portdict) > 0:
self.fd.write("""
.TP
The SELinux user %s_u is able to listen on the following %s ports.
""" % (self.domainname, net))
for p in portdict:
for t, ports in portdict[p]:
self.fd.write("""
.B %s
""" % ",".join(ports))
portdict = network.get_network_connect(self.type, "tcp", "name_connect")
if len(portdict) > 0:
self.fd.write("""
.TP
The SELinux user %s_u is able to connect to the following tcp ports.
""" % (self.domainname))
for p in portdict:
for t, ports in portdict[p]:
self.fd.write("""
.B %s
""" % ",".join(ports))
def _home_exec(self):
permlist = sepolicy.search([sepolicy.ALLOW], {'source': self.type, 'target': 'user_home_type', 'class': 'file', 'permlist': ['ioctl', 'read', 'getattr', 'execute', 'execute_no_trans', 'open']})
self.fd.write("""
.SH HOME_EXEC
""")
if permlist is not None:
self.fd.write("""
The SELinux user %s_u is able execute home content files.
""" % self.domainname)
else:
self.fd.write("""
The SELinux user %s_u is not able execute home content files.
""" % self.domainname)
def _transitions(self):
self.fd.write(r"""
.SH TRANSITIONS
Three things can happen when %(type)s attempts to execute a program.
\fB1.\fP SELinux Policy can deny %(type)s from executing the program.
.TP
\fB2.\fP SELinux Policy can allow %(type)s to execute the program in the current user type.
Execute the following to see the types that the SELinux user %(type)s can execute without transitioning:
.B sesearch -A -s %(type)s -c file -p execute_no_trans
.TP
\fB3.\fP SELinux can allow %(type)s to execute the program and transition to a new type.
Execute the following to see the types that the SELinux user %(type)s can execute and transition:
.B $ sesearch -A -s %(type)s -c process -p transition
""" % {'user': self.domainname, 'type': self.type})
def _role_header(self):
self.fd.write('.TH "%(user)s_selinux" "8" "%(user)s" "mgrepl@redhat.com" "%(user)s SELinux Policy documentation"'
% {'user': self.domainname})
self.fd.write(r"""
.SH "NAME"
%(user)s_r \- \fB%(desc)s\fP - Security Enhanced Linux Policy
.SH DESCRIPTION
SELinux supports Roles Based Access Control (RBAC), some Linux roles are login roles, while other roles need to be transition into.
.I Note:
Examples in this man page will use the
.B staff_u
SELinux user.
Non login roles are usually used for administrative tasks. For example, tasks that require root privileges. Roles control which types a user can run processes with. Roles often have default types assigned to them.
The default type for the %(user)s_r role is %(user)s_t.
The
.B newrole
program to transition directly to this role.
.B newrole -r %(user)s_r -t %(user)s_t
.B sudo
is the preferred method to do transition from one role to another. You setup sudo to transition to %(user)s_r by adding a similar line to the /etc/sudoers file.
USERNAME ALL=(ALL) ROLE=%(user)s_r TYPE=%(user)s_t COMMAND
.br
sudo will run COMMAND as staff_u:%(user)s_r:%(user)s_t:LEVEL
When using a a non login role, you need to setup SELinux so that your SELinux user can reach %(user)s_r role.
Execute the following to see all of the assigned SELinux roles:
.B semanage user -l
You need to add %(user)s_r to the staff_u user. You could setup the staff_u user to be able to use the %(user)s_r role with a command like:
.B $ semanage user -m -R 'staff_r system_r %(user)s_r' staff_u
""" % {'desc': self.desc, 'user': self.domainname})
troles = []
for i in self.role_allows:
if self.domainname + "_r" in self.role_allows[i]:
troles.append(i)
if len(troles) > 0:
plural = ""
if len(troles) > 1:
plural = "s"
self.fd.write("""
SELinux policy also controls which roles can transition to a different role.
You can list these rules using the following command.
.B search --role_allow
SELinux policy allows the %s role%s can transition to the %s_r role.
""" % (", ".join(troles), plural, self.domainname))
|
gpl-2.0
| -2,246,087,477,278,939,100
| 30.686244
| 361
| 0.598971
| false
| 3.405199
| false
| false
| false
|
gonicus/gosa
|
backend/src/gosa/backend/objects/index.py
|
1
|
70746
|
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
"""
Object Index
============
The Object Index is the search engine in GOsa. It keeps track about
all defined object types and can find references to it inside of its
local index database
----
"""
import logging
import multiprocessing
import sys
import re
import traceback
from multiprocessing.pool import Pool
from urllib.parse import urlparse
import ldap
import sqlalchemy
from multiprocessing import RLock
from passlib.hash import bcrypt
from requests import HTTPError
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import CreateTable
from sqlalchemy.sql.ddl import DropTable
from sqlalchemy_searchable import make_searchable, search
from sqlalchemy_utils import TSVectorType
import gosa
from gosa.backend.components.httpd import get_server_url, get_internal_server_url
from gosa.backend.objects.backend.back_foreman import ForemanBackendException
from gosa.backend.utils import BackendTypes
from gosa.common.env import declarative_base, make_session
from gosa.common.event import EventMaker
from lxml import etree
from lxml import objectify
import zope.event
import datetime
import hashlib
import time
import itertools
from gosa.backend.routes.sse.main import SseHandler
from zope.interface import implementer
from gosa.common import Environment
from gosa.common.mqtt_connection_state import BusClientAvailability
from gosa.common.utils import N_
from gosa.common.handler import IInterfaceHandler
from gosa.common.components import Command, Plugin, PluginRegistry, JSONServiceProxy
from gosa.common.error import GosaErrorHandler as C, GosaException
from gosa.backend.objects import ObjectFactory, ObjectProxy, ObjectChanged
from gosa.backend.exceptions import FilterException, IndexException, ProxyException, ObjectException
from gosa.backend.lock import GlobalLock
from sqlalchemy.orm import relationship, subqueryload
from sqlalchemy import Column, String, Integer, Boolean, Sequence, DateTime, ForeignKey, or_, and_, not_, func, orm, \
JSON, Enum
from gosa.backend.routes.system import State
Base = declarative_base()
make_searchable(Base.metadata)
# Register the errors handled by us
C.register_codes(dict(
OBJECT_EXISTS=N_("Object with UUID %(uuid)s already exists"),
OBJECT_NOT_FOUND=N_("Cannot find object %(id)s"),
INDEXING=N_("Index rebuild in progress - try again later"),
NOT_SUPPORTED=N_("Requested search operator %(operator)s is not supported"),
NO_MASTER_BACKEND_FOUND=N_("No master backend found"),
NO_MASTER_BACKEND_CONNECTION=N_("connection to GOsa backend failed"),
NO_BACKEND_CREDENTIALS=N_("Please add valid backend credentials to you configuration (core.backend-user, core.backend-key)"),
DELAYED_UPDATE_FOR_NON_DIRTY_OBJECT=N_("Trying to add a delayed update to a non-dirty object (%(topic)s)")
))
class Schema(Base):
__tablename__ = 'schema'
type = Column(String, primary_key=True)
hash = Column(String(32))
def __repr__(self): # pragma: nocover
return "<Schema(type='%s', hash='%s')>" % (self.type, self.hash)
class SearchObjectIndex(Base):
__tablename__ = "so_index"
so_uuid = Column(String(36), ForeignKey('obj-index.uuid'), primary_key=True)
reverse_parent_dn = Column(String, index=True)
title = Column(String)
description = Column(String)
search = Column(String)
types = Column(String)
search_vector = Column(TSVectorType('title', 'description', 'search', 'types',
weights={'title': 'A', 'types': 'D', 'description': 'C', 'search': 'B'},
regconfig='pg_catalog.simple'
))
object = relationship("ObjectInfoIndex", uselist=False, back_populates="search_object")
def __repr__(self): # pragma: nocover
return "<SearchObjectIndex(so_uuid='%s', reverse_parent_dn='%s', title='%s', description='%s')>" % \
(self.so_uuid, self.reverse_dn, self.title, self.description)
class KeyValueIndex(Base):
__tablename__ = 'kv-index'
key_id = Column(Integer, Sequence('kv_id_seq'), primary_key=True, nullable=False)
uuid = Column(String(36), ForeignKey('obj-index.uuid'))
key = Column(String(64), index=True)
value = Column(String)
def __repr__(self): # pragma: nocover
return "<KeyValueIndex(uuid='%s', key='%s', value='%s')>" % (self.uuid, self.key, self.value)
class ExtensionIndex(Base):
__tablename__ = 'ext-index'
ext_id = Column(Integer, Sequence('ei_id_seq'), primary_key=True, nullable=False)
uuid = Column(String(36), ForeignKey('obj-index.uuid'))
extension = Column(String(64))
def __repr__(self): # pragma: nocover
return "<ExtensionIndex(uuid='%s', extension='%s')>" % (
self.uuid, self.extension)
class ObjectInfoIndex(Base):
__tablename__ = 'obj-index'
uuid = Column(String(36), primary_key=True)
dn = Column(String, index=True)
_parent_dn = Column(String, index=True)
_adjusted_parent_dn = Column(String, index=True)
_type = Column(String(64), index=True)
_last_modified = Column(DateTime)
_invisible = Column(Boolean)
_master_backend = Column(String)
properties = relationship("KeyValueIndex", order_by=KeyValueIndex.key)
extensions = relationship("ExtensionIndex", order_by=ExtensionIndex.extension)
search_object = relationship("SearchObjectIndex", back_populates="object")
def __repr__(self): # pragma: nocover
return "<ObjectInfoIndex(uuid='%s', dn='%s', _parent_dn='%s', _adjusted_parent_dn='%s', _type='%s', _last_modified='%s', _invisible='%s', _master_backend='%s')>" % (
self.uuid, self.dn, self._parent_dn, self._adjusted_parent_dn, self._type, self._last_modified, self._invisible, self._master_backend)
class RegisteredBackend(Base):
__tablename__ = "registered-backends"
uuid = Column(String(36), primary_key=True, nullable=False)
password = Column(String(300), nullable=False)
url = Column(String)
type = Column(Enum(BackendTypes))
def __init__(self, uuid, password, url="", type=BackendTypes.unknown):
self.uuid = uuid
self.password = bcrypt.encrypt(password)
self.url = url
self.type = type
def validate_password(self, password):
return bcrypt.verify(password, self.password)
def __repr__(self): # pragma: nocover
return "<RegisteredBackend(uuid='%s', password='%s', url='%s', type='%s')>" % \
(self.uuid, self.password, self.url, self.type)
class OpenObject(Base):
__tablename__ = "open-objects"
ref = Column(String(36), primary_key=True, nullable=False)
uuid = Column(String(36), nullable=True)
oid = Column(String)
data = Column(JSON)
backend_uuid = Column(String, ForeignKey('registered-backends.uuid'))
backend = relationship("RegisteredBackend")
created = Column(DateTime)
last_interaction = Column(DateTime)
user = Column(String)
session_id = Column(String)
def __repr__(self): # pragma: nocover
return "<OpenObject(ref='%s', uuid='%s', oid='%s', data='%s', backend='%s', created='%s', last_interaction='%s', user='%s', session_id='%s')>" % \
(self.ref, self.uuid, self.oid, self.data, self.backend, self.created, self.last_interaction, self.user, self.session_id)
class UserSession(Base):
__tablename__ = "user-sessions"
sid = Column(String(36), primary_key=True, nullable=False)
user = Column(String)
dn = Column(String)
last_used = Column(DateTime)
auth_state = Column(Integer)
def __repr__(self):
return "<UserSession(sid='%s', user='%s', dn='%s', auth_state='%s', last_used='%s')>" % \
(self.sid, self.user, self.dn, self.auth_state, self.last_used)
class Cache(Base):
__tablename__ = "cache"
key = Column(String, primary_key=True)
data = Column(JSON)
time = Column(DateTime)
def __repr__(self):
return "<Cache(key='%s',data='%s',time='%s')" % (self.key, self.data, self.time)
@compiles(DropTable, "postgresql")
def _compile_drop_table(element, compiler, **kwargs):
return compiler.visit_drop_table(element) + " CASCADE"
class IndexScanFinished(): # pragma: nocover
pass
class IndexSyncFinished(): # pragma: nocover
pass
@implementer(IInterfaceHandler)
class ObjectIndex(Plugin):
"""
The *ObjectIndex* keeps track of objects and their indexed attributes. It
is the search engine that allows quick queries on the data set with
paged results and wildcards.
"""
fuzzy = False
db = None
base = None
_priority_ = 20
_target_ = 'core'
_indexed = False
_post_process_job = None
importing = False
to_be_updated = []
# objects that a currently created (stored in the backend but not in the database yet)
currently_in_creation = []
# objects that are have been changes (changes not in database yet)
__dirty = {}
currently_moving = {}
__search_aid = {}
last_notification = None
# notification period in seconds during indexing
notify_every = 1
__value_extender = None
_acl_resolver = None
procs = multiprocessing.cpu_count()
def __init__(self):
self.env = Environment.getInstance()
# Remove old lock if exists
if GlobalLock.exists("scan_index"):
GlobalLock.release("scan_index")
self.log = logging.getLogger(__name__)
self.log.info("initializing object index handler")
self.factory = ObjectFactory.getInstance()
# Listen for object events
zope.event.subscribers.append(self.__handle_events)
self.lock = RLock()
def serve(self):
# Configure database for the index
orm.configure_mappers()
engine = self.env.getDatabaseEngine("backend-database")
Base.metadata.bind = engine
Base.metadata.create_all()
self.__value_extender = gosa.backend.objects.renderer.get_renderers()
self._acl_resolver = PluginRegistry.getInstance("ACLResolver")
if self.env.mode == "backend":
with make_session() as session:
# create view
try:
# check if extension exists
if session.execute("SELECT * FROM \"pg_extension\" WHERE extname = 'pg_trgm';").rowcount == 0:
session.execute("CREATE EXTENSION pg_trgm;")
if session.execute("SELECT * FROM \"pg_extension\" WHERE extname = 'fuzzystrmatch';").rowcount == 0:
session.execute("CREATE EXTENSION fuzzystrmatch;")
view_name = "unique_lexeme"
# check if view exists
res = session.execute("SELECT count(*) > 0 as \"exists\" FROM pg_catalog.pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind = 'm' AND n.nspname = 'public' AND c.relname = '%s';" % view_name).first()
if res[0] is False:
session.execute("CREATE MATERIALIZED VIEW %s AS SELECT word FROM ts_stat('SELECT so_index.search_vector FROM so_index');" % view_name)
session.execute("CREATE INDEX words_idx ON %s USING gin(word gin_trgm_ops);" % view_name)
self.fuzzy = True
except Exception as e:
self.log.error("Error creating view for unique word index: %s" % str(e))
session.rollback()
try:
current_db_hash = session.query(Schema).filter(Schema.type == 'database').one_or_none()
except:
current_db_hash = None
# check DB schema
tables = [Schema.__table__, KeyValueIndex.__table__, ExtensionIndex.__table__,
SearchObjectIndex.__table__, ObjectInfoIndex.__table__, RegisteredBackend.__table__]
sql = ""
for table in tables:
statement = CreateTable(table)
sql += str(statement.compile(dialect=postgresql.dialect()))
md5s = hashlib.md5()
md5s.update(sql.encode('utf-8'))
md5sum = md5s.hexdigest()
db_recreated = False
schema = self.factory.getXMLObjectSchema(True)
if current_db_hash is None or current_db_hash.hash != md5sum:
# Database schema has changed -> re-create
self.log.info("database schema has changed, dropping object tables")
session.commit()
Base.metadata.drop_all()
Base.metadata.create_all()
self.log.info("created new database tables")
db_schema = Schema(type='database', hash=md5sum)
session.add(db_schema)
session.commit()
# enable indexing
self.env.backend_index = True
db_recreated = True
else:
# If there is already a collection, check if there is a newer schema available
if self.isSchemaUpdated(schema):
session.query(Schema).filter(Schema.type != 'database').delete()
session.query(KeyValueIndex).delete()
session.query(ExtensionIndex).delete()
session.query(SearchObjectIndex).delete()
session.query(ObjectInfoIndex).delete()
session.query(OpenObject).delete() # delete references to backends
self.log.info('object definitions changed, dropped old object index')
# enable indexing
self.env.backend_index = True
# delete the old active master (not the proxies)
session.query(RegisteredBackend).filter(RegisteredBackend.type == 'active_master').delete()
# Create the initial schema information if required
if not session.query(Schema).filter(Schema.type == 'objects').one_or_none():
self.log.info('created schema')
md5s = hashlib.md5()
md5s.update(schema)
md5sum = md5s.hexdigest()
schema = Schema(type='objects', hash=md5sum)
session.add(schema)
session.commit()
# Extract search aid
attrs = {}
mapping = {}
resolve = {}
aliases = {}
for otype in self.factory.getObjectTypes():
# Assemble search aid
item = self.factory.getObjectSearchAid(otype)
if not item:
continue
typ = item['type']
aliases[typ] = [typ]
if not typ in attrs:
attrs[typ] = []
if not typ in resolve:
resolve[typ] = []
if not typ in mapping:
mapping[typ] = dict(dn="dn", title="title", description="description", icon=None)
attrs[typ] += item['search']
if 'keyword' in item:
aliases[typ] += item['keyword']
if 'map' in item:
mapping[typ].update(item['map'])
if 'resolve' in item:
resolve[typ] += item['resolve']
# Add index for attribute used for filtering and memorize
# attributes potentially needed for queries.
tmp = [x for x in attrs.values()]
used_attrs = list(itertools.chain.from_iterable(tmp))
used_attrs += list(itertools.chain.from_iterable([x.values() for x in mapping.values()]))
used_attrs += list(set(itertools.chain.from_iterable([[x[0]['filter'], x[0]['attribute']] for x in resolve.values() if len(x)])))
used_attrs = list(set(used_attrs))
# Remove potentially not assigned values
used_attrs = [u for u in used_attrs if u]
# Memorize search information for later use
self.__search_aid = dict(attrs=attrs,
used_attrs=used_attrs,
mapping=mapping,
resolve=resolve,
aliases=aliases)
# store core_uuid/core_key into DB
if hasattr(self.env, "core_uuid"):
if self.env.mode == "backend":
with make_session() as session:
if db_recreated is False:
tables_to_recreate = [UserSession.__table__, OpenObject.__table__]
for table in tables_to_recreate:
table.drop(engine)
Base.metadata.create_all(tables=tables_to_recreate)
rb = RegisteredBackend(
uuid=self.env.core_uuid,
password=self.env.core_key,
url=get_server_url(),
type=BackendTypes.active_master
)
session.add(rb)
session.commit()
else:
self.registerProxy()
# Schedule index sync
if self.env.backend_index is True and self.env.mode == 'backend':
if not hasattr(sys, '_called_from_test'):
sobj = PluginRegistry.getInstance("SchedulerService")
sobj.getScheduler().add_date_job(self.syncIndex,
datetime.datetime.now() + datetime.timedelta(seconds=1),
tag='_internal', jobstore='ram')
else:
def finish():
zope.event.notify(IndexScanFinished())
zope.event.notify(IndexSyncFinished())
State.system_state = "ready"
sobj = PluginRegistry.getInstance("SchedulerService")
sobj.getScheduler().add_date_job(finish,
datetime.datetime.now() + datetime.timedelta(seconds=10),
tag='_internal', jobstore='ram')
def registerProxy(self, backend_uuid=None):
if self.env.mode == "proxy":
# register on the current master
with make_session() as session:
# get any other registered backend
if backend_uuid is None:
master_backend = session.query(RegisteredBackend) \
.filter(RegisteredBackend.uuid != self.env.core_uuid,
RegisteredBackend.type == BackendTypes.active_master).first()
else:
master_backend = session.query(RegisteredBackend) \
.filter(RegisteredBackend.uuid == backend_uuid,
RegisteredBackend.type == BackendTypes.active_master).first()
if master_backend is None:
raise GosaException(C.make_error("NO_MASTER_BACKEND_FOUND"))
# Try to log in with provided credentials
url = urlparse("%s/rpc" % master_backend.url)
connection = '%s://%s%s' % (url.scheme, url.netloc, url.path)
proxy = JSONServiceProxy(connection)
if self.env.config.get("core.backend-user") is None or self.env.config.get("core.backend-key") is None:
raise GosaException(C.make_error("NO_BACKEND_CREDENTIALS"))
# Try to log in
try:
if not proxy.login(self.env.config.get("core.backend-user"), self.env.config.get("core.backend-key")):
raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
else:
proxy.registerBackend(self.env.core_uuid,
self.env.core_key, get_internal_server_url(),
BackendTypes.proxy)
except HTTPError as e:
if e.code == 401:
raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
else:
self.log.error("Error: %s " % str(e))
raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
# except Exception as e:
# self.log.error("Error: %s " % str(e))
# raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
def stop(self):
if self.__handle_events in zope.event.subscribers:
zope.event.subscribers.remove(self.__handle_events)
def mark_as_dirty(self, obj):
"""
Marks an object as "dirty". Dirty objects are currently being persisted to their backends (aka committed).
:param obj:
:type obj: gosa.backend.proxy.ObjectProxy
:return:
"""
if not self.is_dirty(obj.uuid):
self.__dirty[obj.uuid] = {"obj": obj, "updates": []}
self.log.info("marked %s (%s) as dirty (%s)" % (obj.uuid, obj.dn, self.__dirty))
def is_dirty(self, uuid):
"""
Check if an object identified by UUID is marked as "dirty".
:param uuid: UUID ob the object to check
:type uuid: str
:return: True if "dirty"
"""
return uuid in self.__dirty
def get_dirty_objects(self):
return self.__dirty
def add_delayed_update(self, obj, update, inject=False, skip_backend_writes=[]):
"""
Add a delayed update for an object that is currently being committed (marked "dirty").
This update will be processed after the ongoing commit has been completed.
:param obj: The object to apply the update to
:type obj: gosa.backend.proxy.ObjectProxy
:param update: updated data that can be processed by :meth:`gosa.backend.proxy.ObjectProxy.apply_update`
:type update: dict
"""
if not self.is_dirty(obj.uuid):
self.log.warning("Trying to add a delayed update to a non-dirty object '%s'" % obj.uuid)
obj.apply_update(update)
obj.commit(skip_backend_writes=skip_backend_writes)
return
self.log.info("adding delayed update to %s (%s)" % (obj.uuid, obj.dn))
self.__dirty[obj.uuid]["updates"].append({
"inject": inject,
"data": update,
"skip_backend_writes": skip_backend_writes
})
def unmark_as_dirty(self, id):
"""
removes the "dirty" mark for the object and processes the delayed updates
:param id: UUID of the Object to unmark or ObjectProxy instance
:type id: str|ObjectProxy
"""
if isinstance(id, ObjectProxy):
uuid = id.uuid
else:
uuid = id
if self.is_dirty(uuid):
obj = self.__dirty[uuid]['obj']
if len(self.__dirty[uuid]['updates']) > 0:
# freshly open the object
entry = self.__dirty[uuid]
new_obj = ObjectProxy(entry["obj"].dn)
for update in entry["updates"]:
if update["inject"] is True:
self.log.info("injecting %s to %s" % (update["data"], obj.uuid))
new_obj.inject_backend_data(update["data"], force_update=True)
else:
self.log.info("applying %s to %s" % (update["data"], obj.uuid))
new_obj.apply_update(update["data"])
del self.__dirty[uuid]
new_obj.commit(skip_backend_writes=entry["skip_backend_writes"])
else:
del self.__dirty[uuid]
self.log.info("unmarked %s (%s) as dirty (%s)" % (obj.uuid, obj.dn, self.__dirty))
def is_currently_moving(self, dn, move_target=False):
if move_target:
# check for value (the new dn after movement)
return dn in self.currently_moving.values()
else:
# check for key (the old dn before the movement)
return dn in self.currently_moving.keys()
def __backend_change_processor(self, data):
"""
This method gets called if an external backend reports
a modification of an entry under its hood.
We use it to update / create / delete existing index
entries.
"""
data = data.BackendChange
dn = data.DN.text if hasattr(data, 'DN') else None
new_dn = data.NewDN.text if hasattr(data, 'NewDN') else None
change_type = data.ChangeType.text
_uuid = data.UUID.text if hasattr(data, 'UUID') else None
_last_changed = datetime.datetime.strptime(data.ModificationTime.text, "%Y%m%d%H%M%SZ")
obj = None
if not _uuid and not dn:
return
# Set importing flag to true in order to be able to post process incoming
# objects.
ObjectIndex.importing = True
# Setup or refresh timer job to run the post processing
sched = PluginRegistry.getInstance("SchedulerService").getScheduler()
next_run = datetime.datetime.now() + datetime.timedelta(0, 5)
if not hasattr(sys, '_called_from_test'):
if self._post_process_job:
sched.reschedule_date_job(self._post_process_job, next_run)
else:
self._post_process_job = sched.add_date_job(self._post_process_by_timer, next_run, tag='_internal', jobstore="ram", )
# Resolve dn from uuid if needed
with make_session() as session:
if not dn:
dn = session.query(ObjectInfoIndex.dn).filter(ObjectInfoIndex.uuid == _uuid).one_or_none()
# Modification
if change_type == "modify":
# Get object
obj = self._get_object(dn)
if not obj:
return
# Check if the entry exists - if not, maybe let create it
entry = session.query(ObjectInfoIndex.dn).filter(
or_(
ObjectInfoIndex.uuid == _uuid,
func.lower(ObjectInfoIndex.dn) == func.lower(dn)
)).one_or_none()
if entry:
self.update(obj, session=session)
else:
self.insert(obj, session=session)
# Add
if change_type == "add":
# Get object
obj = self._get_object(dn)
if not obj:
return
self.insert(obj, session=session)
# Delete
if change_type == "delete":
self.log.info("object has changed in backend: indexing %s" % dn)
self.log.warning("external delete might not take care about references")
if _uuid is not None:
self.remove_by_uuid(_uuid, session=session)
else:
obj = self._get_object(dn)
if obj is None:
# lets see if we can find a UUID for the deleted DN
uuid = session.query(ObjectInfoIndex.uuid).filter(func.lower(ObjectInfoIndex.dn) == func.lower(dn)).one_or_none()
if uuid is not None:
self.remove_by_uuid(uuid)
else:
self.remove(obj)
# Move
if change_type in ['modrdn', 'moddn']:
# Check if the entry exists - if not, maybe let create it
entry = session.query(ObjectInfoIndex).filter(
or_(
ObjectInfoIndex.uuid == _uuid,
func.lower(ObjectInfoIndex.dn) == func.lower(dn)
)).one_or_none()
if new_dn is not None and new_dn[-1:] == ",":
# only new RDN received, get parent from db
if entry is not None:
new_dn = new_dn + entry._parent_dn
else:
self.log.error('DN modification event received: could not get parent DN from existing object to complete the new DN')
# Get object
obj = self._get_object(new_dn)
if not obj:
return
if entry:
self.update(obj)
else:
self.insert(obj)
# send the event to the clients
event_change_type = "update"
if change_type == "add":
event_change_type = "create"
elif change_type == "delete":
event_change_type = "remove"
e = EventMaker()
if obj:
ev = e.Event(e.ObjectChanged(
e.UUID(obj.uuid),
e.DN(obj.dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(event_change_type)
))
elif _uuid is not None:
ev = e.Event(e.ObjectChanged(
e.UUID(_uuid),
e.DN(dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(event_change_type)
))
else:
ev = e.Event(e.ObjectChanged(
e.DN(dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(event_change_type)
))
event = "<?xml version='1.0'?>\n%s" % etree.tostring(ev, pretty_print=True).decode('utf-8')
# Validate event
xml = objectify.fromstring(event, PluginRegistry.getEventParser())
SseHandler.notify(xml, channel="broadcast")
if hasattr(sys, '_called_from_test'):
self.post_process()
def get_last_modification(self, backend='LDAP'):
with make_session() as session:
res = session.query(ObjectInfoIndex._last_modified)\
.filter(ObjectInfoIndex._master_backend == backend)\
.order_by(ObjectInfoIndex._last_modified.desc())\
.limit(1)\
.one_or_none()
if res is not None:
return res[0]
return None
def _post_process_by_timer(self):
self._post_process_job = None
self.post_process()
def _get_object(self, dn):
try:
obj = ObjectProxy(dn)
except (ProxyException, ldap.NO_SUCH_OBJECT) as e:
self.log.warning("not found %s: %s" % (dn, str(e)))
obj = None
except ObjectException as e:
self.log.warning("not indexing %s: %s" % (dn, str(e)))
obj = None
return obj
def get_search_aid(self):
return self.__search_aid
def isSchemaUpdated(self, schema):
# Calculate md5 checksum for potentially new schema
md5s = hashlib.md5()
md5s.update(schema)
md5sum = md5s.hexdigest()
with make_session() as session:
stored_md5sum = session.query(Schema.hash).filter(Schema.type == 'objects').one_or_none()
if stored_md5sum and stored_md5sum[0] == md5sum:
return False
return True
def notify_frontends(self, state, progress=None, step=None):
e = EventMaker()
ev = e.Event(e.BackendState(
e.Type("index"),
e.State(state),
e.Progress(str(progress)),
e.Step(str(step)),
e.TotalSteps(str(4))
))
event_object = objectify.fromstring(etree.tostring(ev, pretty_print=True).decode('utf-8'))
SseHandler.notify(event_object, channel="broadcast")
@Command(__help__=N_('Start index synchronizing from an optional root-DN'))
def syncIndex(self, base=None):
State.system_state = "indexing"
# Don't index if someone else is already doing it
if GlobalLock.exists("scan_index"):
return
# Don't run index, if someone else already did until the last
# restart.
cr = PluginRegistry.getInstance("CommandRegistry")
GlobalLock.acquire("scan_index")
ObjectIndex.importing = True
updated = 0
added = 0
existing = 0
total = 0
index_successful = False
t0 = time.time()
if base is None:
start_dn = self.env.base
else:
start_dn = base
try:
self._indexed = True
self.last_notification = time.time()
self.log.info("scanning for objects")
self.notify_frontends(N_("scanning for objects"), step=1)
with Pool(processes=self.procs) as pool:
children = self.factory.getObjectChildren(start_dn)
result = pool.starmap_async(resolve_children, [(dn,) for dn in children.keys()])
while not result.ready():
self.notify_frontends(N_("scanning for objects"), step=1)
self.last_notification = time.time()
time.sleep(self.notify_every)
res = children
for r in result.get():
res = {**res, **r}
# count by type
counts = {}
for o in res.keys():
if res[o] not in counts:
counts[res[o]] = 1
else:
counts[res[o]] += 1
self.log.info("Found objects: %s" % counts)
res[self.env.base] = 'dummy'
self.log.info("generating object index")
self.notify_frontends(N_("Generating object index"))
# Find new entries
backend_objects = []
total = len(res)
oids = sorted(res.keys(), key=len)
with Pool(processes=self.procs) as pool:
self.log.info("processing objects with %d entries" % len(oids))
result = pool.starmap_async(process_objects, [(oid,) for oid in oids], chunksize=1)
while not result.ready():
now = time.time()
current = total-result._number_left
self.notify_frontends(N_("Processing object %s/%s" % (current, total)), round(100/total*current), step=2)
self.last_notification = now
time.sleep(self.notify_every)
for r, uuid, to_be_updated in result.get():
backend_objects.append(uuid)
ObjectIndex.to_be_updated.extend(to_be_updated)
if r == "added":
added += 1
elif r == "existing":
existing += 1
elif r == "updated":
updated += 1
self.notify_frontends(N_("%s objects processed" % total), 100, step=2)
# Remove entries that are in the index, but not in any other backends
if base is None:
self.notify_frontends(N_("removing orphan objects from index"), step=3)
with make_session() as session:
removed = self.__remove_others(backend_objects, session=session)
else:
removed = 0
self.log.info("%s added, %s updated, %s removed, %s are up-to-date" % (added, updated, removed, existing))
index_successful = True
except Exception as e:
self.log.critical("building the index failed: %s" % str(e))
traceback.print_exc()
finally:
if index_successful is True:
self.post_process()
self.log.info("index refresh finished")
self.notify_frontends(N_("Index refresh finished"), 100, step=4)
GlobalLock.release("scan_index")
t1 = time.time()
self.log.info("processed %d objects in %ds" % (total, t1 - t0))
# notify others that the index scan is done, they now can do own sync processed
zope.event.notify(IndexScanFinished())
# now the index is really ready and up-to-date
zope.event.notify(IndexSyncFinished())
State.system_state = "ready"
else:
raise IndexException("Error creating index, please restart.")
def post_process(self):
ObjectIndex.importing = False
self.last_notification = time.time()
uuids = list(set(ObjectIndex.to_be_updated))
ObjectIndex.to_be_updated = []
total = len(uuids)
# Some object may have queued themselves to be re-indexed, process them now.
self.log.info("need to refresh index for %d objects" % total)
with Pool(processes=self.procs) as pool:
result = pool.starmap_async(post_process, [(uuid,) for uuid in uuids], chunksize=1)
while not result.ready():
now = time.time()
current = total-result._number_left
if GlobalLock.exists("scan_index"):
self.notify_frontends(N_("Refreshing object %s/%s" % (current, total)), round(100/total*current), step=4)
self.last_notification = now
time.sleep(self.notify_every)
if len(ObjectIndex.to_be_updated):
self.post_process()
self.update_words()
def index_active(self): # pragma: nocover
return self._indexed
def update_words(self, session=None):
if session is None:
with make_session() as session:
self._update_words(session)
else:
self._update_words(session)
def _update_words(self, session):
# update unique word list
if self.fuzzy is True:
try:
session.execute("REFRESH MATERIALIZED VIEW unique_lexeme;")
except Exception as e:
session.rollback()
raise e
def __handle_events(self, event, retried=0):
if GlobalLock.exists("scan_index"):
return
if isinstance(event, objectify.ObjectifiedElement):
self.__backend_change_processor(event)
elif isinstance(event, ObjectChanged):
change_type = None
_uuid = event.uuid
_dn = None
_last_changed = datetime.datetime.now()
# Try to find the affected DN
with make_session() as session:
e = session.query(ObjectInfoIndex).filter(ObjectInfoIndex.uuid == _uuid).one_or_none()
if e:
# New pre-events don't have a dn. Just skip is in this case...
if hasattr(e, 'dn'):
_dn = e.dn
if e._last_modified is not None:
_last_changed = e._last_modified
else:
_dn = "not known yet"
if event.reason == "post object remove":
self.log.debug("removing object index for %s (%s)" % (_uuid, _dn))
self.remove_by_uuid(_uuid, session=session)
change_type = "remove"
if event.reason == "pre object move":
self.log.debug("starting object movement from %s to %s" % (_dn, event.dn))
self.currently_moving[_dn] = event.dn
try:
if event.reason == "post object move":
self.log.debug("updating object index for %s (%s)" % (_uuid, _dn))
obj = ObjectProxy(event.dn, skip_value_population=True)
self.update(obj, session=session)
_dn = obj.dn
change_type = "move"
if event.orig_dn in self.currently_moving:
del self.currently_moving[event.orig_dn]
if event.reason == "post object create":
self.log.debug("creating object index for %s (%s)" % (_uuid, _dn))
obj = ObjectProxy(event.dn, skip_value_population=True)
self.insert(obj, session=session)
_dn = obj.dn
change_type = "create"
if event.reason == "post object update":
self.log.debug("updating object index for %s (%s)" % (_uuid, _dn))
if not event.dn and _dn != "not known yet":
event.dn = _dn
obj = ObjectProxy(event.dn, skip_value_population=True)
self.update(obj, session=session)
change_type = "update"
except ForemanBackendException as e:
if e.response.status_code == 404:
self.log.info("Foreman object %s (%s) not available yet, skipping index update."
% (_uuid, _dn))
# do nothing else as foreman will send some kind of event, when the object becomes available
else:
raise e
# send the event to the clients
e = EventMaker()
if event.reason[0:4] == "post" and _uuid and _dn and change_type and \
(change_type != "update" or len(event.changed_props)):
ev = e.Event(e.ObjectChanged(
e.UUID(_uuid),
e.DN(_dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(change_type)
))
event_string = "<?xml version='1.0'?>\n%s" % etree.tostring(ev, pretty_print=True).decode('utf-8')
# Validate event
xml = objectify.fromstring(event_string, PluginRegistry.getEventParser())
SseHandler.notify(xml, channel="broadcast")
elif isinstance(event, BusClientAvailability):
backend_registry = PluginRegistry.getInstance("BackendRegistry")
if event.type == "proxy":
# entering proxies are not handled, because they register themselves with credentials vie JSONRPC
if event.state == "leave":
self.log.debug("unregistering proxy: %s" % event.client_id)
backend_registry.unregisterBackend(event.client_id)
elif event.type == "backend":
if event.state == "ready":
self.log.debug("new backend announced: %s" % event.client_id)
if self.env.mode == "proxy":
# register ourselves to this backend
self.registerProxy(event.client_id)
def insert(self, obj, skip_base_check=False, session=None):
if session is not None:
self._insert(obj, session, skip_base_check=skip_base_check)
else:
with make_session() as session:
self._insert(obj, session, skip_base_check=skip_base_check)
def _insert(self, obj, session, skip_base_check=False):
if not skip_base_check:
pdn = session.query(ObjectInfoIndex.dn).filter(ObjectInfoIndex.dn == obj.get_parent_dn()).one_or_none()
# No parent?
if not pdn:
self.log.debug("ignoring object that has no base in the current index: " + obj.dn)
return
parent = self._get_object(obj.get_parent_dn())
if not parent.can_host(obj.get_base_type()):
self.log.debug("ignoring object that is not relevant for the index: " + obj.dn)
return
self.log.debug("creating object index for %s (%s)" % (obj.uuid, obj.dn))
uuid = session.query(ObjectInfoIndex.uuid).filter(ObjectInfoIndex.uuid == obj.uuid).one_or_none()
if uuid:
raise IndexException(C.make_error('OBJECT_EXISTS', "base", uuid=obj.uuid))
with self.lock:
self.__save(obj.asJSON(True, use_in_value=True), session=session)
def __save(self, data, session=None):
if self.env.mode == "proxy":
self.log.error("GOsa proxy is not allowed to write anything to the database")
if session is not None:
self.__session_save(data, session)
else:
with make_session() as session:
self.__session_save(data, session)
def __session_save(self, data, session):
try:
# Assemble object index object
oi = ObjectInfoIndex(
uuid=data["_uuid"],
dn=data["dn"],
_type=data["_type"],
_parent_dn=data["_parent_dn"],
_adjusted_parent_dn=data["_adjusted_parent_dn"],
_invisible=data["_invisible"],
_master_backend=data["_master_backend"]
)
if '_last_changed' in data:
oi._last_modified = datetime.datetime.fromtimestamp(data["_last_changed"])
session.add(oi)
# Assemble extension index objects
for ext in data["_extensions"]:
ei = ExtensionIndex(uuid=data["_uuid"], extension=ext)
session.add(ei)
# Assemble key value index objects
for key, value in data.items():
# Skip meta information and DN
if key.startswith("_") or key == "dn":
continue
if isinstance(value, list):
for v in value:
kvi = KeyValueIndex(uuid=data["_uuid"], key=key, value=v)
session.add(kvi)
else:
kvi = KeyValueIndex(uuid=data["_uuid"], key=key, value=value)
session.add(kvi)
# assemble search object
if data['_type'] in self.__search_aid['mapping']:
aid = self.__search_aid['mapping'][data['_type']]
attrs = self.__search_aid['attrs'][data['_type']] if data['_type'] in self.__search_aid['attrs'] else []
types = [data['_type']]
types.extend(data["_extensions"])
# append aliases to search words
for type in types[:]:
if type in self.__search_aid['aliases']:
types.extend(self.__search_aid['aliases'][type])
for ext in data["_extensions"]:
if ext in self.__search_aid['mapping']:
aid.update(self.__search_aid['mapping'][ext])
if ext in self.__search_aid['attrs']:
attrs.extend(self.__search_aid['attrs'][ext])
attrs = list(set(attrs))
search_words = [", ".join(data[x]) for x in attrs if x in data and data[x] is not None]
so = SearchObjectIndex(
so_uuid=data["_uuid"],
reverse_parent_dn=','.join([d for d in ldap.dn.explode_dn(data["_parent_dn"], flags=ldap.DN_FORMAT_LDAPV3)[::-1]]),
title=self.__build_value(aid["title"], data),
description=self.__build_value(aid["description"], data),
search=" ".join(search_words),
types=" ".join(list(set(types)))
)
session.add(so)
session.commit()
# update word index on change (if indexing is not running currently)
if not GlobalLock.exists("scan_index"):
self.update_words(session=session)
self.unmark_as_dirty(data["_uuid"])
except Exception as e:
self.log.error('Error during save: %s' % str(e))
def __build_value(self, v, info):
"""
Fill placeholders in the value to be displayed as "description".
"""
if not v:
return None
if v in info:
return ", ".join(info[v])
# Find all placeholders
attrs = {}
for attr in re.findall(r"%\(([^)]+)\)s", v):
# Extract ordinary attributes
if attr in info:
attrs[attr] = ", ".join(info[attr])
# Check for result renderers
elif attr in self.__value_extender:
attrs[attr] = self.__value_extender[attr](info)
# Fallback - just set nothing
else:
attrs[attr] = ""
# Assemble and remove empty lines and multiple whitespaces
res = v % attrs
res = re.sub(r"(<br>)+", "<br>", res)
res = re.sub(r"^<br>", "", res)
res = re.sub(r"<br>$", "", res)
return "<br>".join([s.strip() for s in res.split("<br>")])
def remove(self, obj, session=None):
self.remove_by_uuid(obj.uuid, session=session)
def __remove_others(self, uuids, session=None):
if session is not None:
return self.__session_remove_others(uuids, session)
else:
with make_session() as session:
return self.__session_remove_others(uuids, session)
def __session_remove_others(self, uuids, session):
self.log.debug("removing a bunch of objects")
session.query(KeyValueIndex).filter(~KeyValueIndex.uuid.in_(uuids)).delete(synchronize_session=False)
session.query(ExtensionIndex).filter(~ExtensionIndex.uuid.in_(uuids)).delete(synchronize_session=False)
session.query(SearchObjectIndex).filter(~SearchObjectIndex.so_uuid.in_(uuids)).delete(synchronize_session=False)
removed = session.query(ObjectInfoIndex).filter(~ObjectInfoIndex.uuid.in_(uuids)).delete(synchronize_session=False)
session.commit()
return removed
def remove_by_uuid(self, uuid, session=None):
if session is not None:
self.__remove_by_uuid(uuid, session)
else:
with make_session() as session:
self.__remove_by_uuid(uuid, session)
def __remove_by_uuid(self, uuid, session):
self.log.debug("removing object index for %s" % uuid)
if self.exists(uuid, session=session):
session.query(KeyValueIndex).filter(KeyValueIndex.uuid == uuid).delete()
session.query(ExtensionIndex).filter(ExtensionIndex.uuid == uuid).delete()
session.query(SearchObjectIndex).filter(SearchObjectIndex.so_uuid == uuid).delete()
session.query(ObjectInfoIndex).filter(ObjectInfoIndex.uuid == uuid).delete()
session.commit()
def update(self, obj, session=None):
if session is not None:
self.__update(obj, session)
else:
with make_session() as session:
self.__update(obj, session)
def __update(self, obj, session):
# Gather information
current = obj.asJSON(True, use_in_value=True)
old_dn = session.query(ObjectInfoIndex.dn).filter(ObjectInfoIndex.uuid == obj.uuid).one_or_none()
if not old_dn:
raise IndexException(C.make_error('OBJECT_NOT_FOUND', "base", id=obj.uuid))
old_dn = old_dn[0]
# Remove old entry and insert new
with self.lock:
self.remove_by_uuid(obj.uuid, session=session)
self.__save(current, session=session)
# Has the entry been moved?
if current['dn'] != old_dn:
# Adjust all ParentDN entries of child objects
res = session.query(ObjectInfoIndex).filter(
or_(ObjectInfoIndex._parent_dn == old_dn, ObjectInfoIndex._parent_dn.like('%' + old_dn))
).all()
for entry in res:
o_uuid = entry.uuid
o_dn = entry.dn
o_parent = entry._parent_dn
o_adjusted_parent = entry._adjusted_parent_dn
n_dn = o_dn[:-len(old_dn)] + current['dn']
n_parent = o_parent[:-len(old_dn)] + current['dn']
n_adjusted_parent = o_adjusted_parent[:-len(o_adjusted_parent)] + current['_adjusted_parent_dn']
oi = session.query(ObjectInfoIndex).filter(ObjectInfoIndex.uuid == o_uuid).one()
oi.dn = n_dn
oi._parent_dn = n_parent
oi._adjusted_parent_dn = n_adjusted_parent
session.commit()
@Command(__help__=N_("Check if an object with the given UUID exists."))
def exists(self, uuid, session=None):
"""
Do a database query for the given UUID and return an
existance flag.
========== ==================
Parameter Description
========== ==================
uuid Object identifier
========== ==================
``Return``: True/False
"""
if session is not None:
return session.query(ObjectInfoIndex.uuid).filter(ObjectInfoIndex.uuid == uuid).one_or_none() is not None
else:
with make_session() as session:
return session.query(ObjectInfoIndex.uuid).filter(ObjectInfoIndex.uuid == uuid).one_or_none() is not None
@Command(__help__=N_("Get list of defined base object types."))
def getBaseObjectTypes(self):
ret = []
for k, v in self.factory.getObjectTypes().items():
if v['base']:
ret.append(k)
return ret
@Command(needsUser=True, __help__=N_("Query the index for entries."))
def find(self, user, query, conditions=None):
"""
Perform a raw sqlalchemy query.
========== ==================
Parameter Description
========== ==================
query Query hash
conditions Conditions hash
========== ==================
For more information on the query format, consult the mongodb documentation.
``Return``: List of dicts
"""
res = []
# Always return dn and _type - we need it for ACL control
if isinstance(conditions, dict):
conditions['dn'] = 1
conditions['type'] = 1
else:
conditions = None
if not isinstance(query, dict):
raise FilterException(C.make_error('INVALID_QUERY'))
# Create result-set
for item in self.search(query, conditions):
# Filter out what the current use is not allowed to see
item = self.__filter_entry(user, item)
if item and item['dn'] is not None:
res.append(item)
return res
def _make_filter(self, node, session):
use_extension = False
def __make_filter(n, session):
nonlocal use_extension
res = []
for key, value in n.items():
if isinstance(value, dict):
# Maintain certain key words
if key == "and_":
res.append(and_(*__make_filter(value, session)))
elif key == "or_":
res.append(or_(*__make_filter(value, session)))
elif key == "not_":
res.append(not_(*__make_filter(value, session)))
elif 'not_in_' in value or 'in_' in value:
if key == "extension":
use_extension = True
if 'not_in_' in value:
res.append(~ExtensionIndex.extension.in_(value['not_in_']))
elif 'in_' in value:
res.append(ExtensionIndex.extension.in_(value['in_']))
elif hasattr(ObjectInfoIndex, key):
attr = getattr(ObjectInfoIndex, key)
if 'not_in_' in value:
res.append(~attr.in_(value['not_in_']))
elif 'in_' in value:
res.append(attr.in_(value['in_']))
else:
in_expr = None
if 'not_in_' in value:
in_expr = ~KeyValueIndex.value.in_(value['not_in_'])
elif 'in_' in value:
in_expr = KeyValueIndex.value.in_(value['in_'])
sub_query = session.query(KeyValueIndex.uuid).filter(KeyValueIndex.key == key, in_expr).subquery()
res.append(ObjectInfoIndex.uuid.in_(sub_query))
else:
raise IndexException(C.make_error('NOT_SUPPORTED', "base", operator=key))
elif isinstance(value, list):
# implicit or_ in case of lists - hashes cannot have multiple
# keys with the same name
exprs = []
for v in value:
# convert integers because we need strings
if isinstance(v, int):
v = "%s" % v
if hasattr(ObjectInfoIndex, key):
if "%" in v:
if v == "%":
exprs.append(getattr(ObjectInfoIndex, key).like(v))
else:
exprs.append(getattr(ObjectInfoIndex, key).ilike(v))
else:
exprs.append(getattr(ObjectInfoIndex, key) == v)
elif key == "extension":
use_extension = True
exprs.append(ExtensionIndex.extension == v)
else:
if key == "*":
sub_query = search(session.query(SearchObjectIndex.so_uuid), v, sort=True, regconfig='simple').subquery()
elif "%" in v:
if v == "%":
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.like(v))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.ilike(v))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value == v)). \
subquery()
res.append(ObjectInfoIndex.uuid.in_(sub_query))
res.append(or_(*exprs))
else:
# convert integers because we need strings
if isinstance(value, int):
value = "%s" % value
if hasattr(ObjectInfoIndex, key):
if "%" in value:
res.append(getattr(ObjectInfoIndex, key).ilike(value))
else:
res.append(getattr(ObjectInfoIndex, key) == value)
elif key == "extension":
use_extension = True
res.append(ExtensionIndex.extension == value)
else:
if key == "*":
sub_query = search(session.query(SearchObjectIndex.so_uuid), value, sort=True, regconfig='simple').subquery()
elif "%" in value:
if value == "%":
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.like(value))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.ilike(value))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value == value)). \
subquery()
res.append(ObjectInfoIndex.uuid.in_(sub_query))
return res
# Add query information to be able to search various tables
_args = __make_filter(node, session)
if use_extension:
args = [ObjectInfoIndex.uuid == ExtensionIndex.uuid]
args += _args
return and_(*args)
return _args
def get_extensions(self, uuid):
""" return the list of active extensions for the given uuid-object as store in the db """
with make_session() as session:
q = session.query(ExtensionIndex).filter(ExtensionIndex.uuid == uuid)
return [e.extension for e in q.all()]
def search(self, query, properties, options=None, session=None):
"""
Perform an index search
========== ==================
Parameter Description
========== ==================
query Query hash
properties Conditions hash
========== ==================
For more information on the query format, consult the mongodb documentation.
``Return``: List of dicts
"""
if session is None:
with make_session() as session:
return self._session_search(session, query, properties, options=options)
else:
return self._session_search(session, query, properties, options=options)
def _session_search(self, session, query, properties, options=None):
res = []
fltr = self._make_filter(query, session)
def normalize(data, resultset=None, so_props=None):
_res = {
"_uuid": data.uuid,
"dn": data.dn,
"_type": data._type,
"_parent_dn": data._parent_dn,
"_adjusted_parent_dn": data._adjusted_parent_dn,
"_last_changed": data._last_modified,
"_extensions": []
}
# Add extension list
for extension in data.extensions:
_res["_extensions"].append(extension.extension)
# Add indexed properties
for kv in data.properties:
if kv.key in _res:
_res[kv.key].append(kv.value)
else:
_res[kv.key] = [kv.value]
# get data from SearchObjectIndex (e.g. title, description)
if so_props is not None and len(so_props) > 0 and len(data.search_object) > 0:
for prop in so_props:
_res[prop] = [getattr(data.search_object[0], prop)]
# Clean the result set?
if resultset:
for key in [_key for _key in _res if not _key in resultset.keys() and _key[0:1] != "_"]:
_res.pop(key, None)
return _res
if options is None:
options = {}
q = session.query(ObjectInfoIndex) \
.options(subqueryload(ObjectInfoIndex.properties)) \
.options(subqueryload(ObjectInfoIndex.extensions))
# check if we need something from the searchObject
so_props = None
if properties is not None:
so_props = [x for x in properties if hasattr(SearchObjectIndex, x)]
if len(so_props) > 0:
q = q.options(subqueryload(ObjectInfoIndex.search_object))
q = q.filter(*fltr)
if 'limit' in options:
q.limit(options['limit'])
# self.log.info(print_query(q))
try:
for o in q.all():
res.append(normalize(o, properties, so_props=so_props))
except sqlalchemy.exc.InternalError as e:
self.log.error(str(e))
session.rollback()
return res
def __filter_entry(self, user, entry):
"""
Takes a query entry and decides based on the user what to do
with the result set.
========== ===========================
Parameter Description
========== ===========================
user User ID
entry Search entry as hash
========== ===========================
``Return``: Filtered result entry
"""
if self._acl_resolver.isAdmin(user):
return entry
res = {}
for attr in entry.keys():
if attr in ['dn', '_type', '_uuid', '_last_changed']:
res[attr] = entry[attr]
continue
if self.__has_access_to(user, entry['dn'], entry['_type'], attr):
res[attr] = entry[attr]
return res
def __has_access_to(self, user, object_dn, object_type, attr):
"""
Checks whether the given user has access to the given object/attribute or not.
"""
if user:
topic = "%s.objects.%s.attributes.%s" % (self.env.domain, object_type, attr)
return self._acl_resolver.check(user, topic, "r", base=object_dn)
else:
return True
# needs to be top level to be picklable
def process_objects(o):
res = None
index = PluginRegistry.getInstance("ObjectIndex")
with make_session() as inner_session:
if o is None:
return None, None, ObjectIndex.to_be_updated
# Get object
try:
obj = ObjectProxy(o)
except Exception as e:
index.log.warning("not indexing %s: %s" % (o, str(e)))
return res, None, ObjectIndex.to_be_updated
# Check for index entry
last_modified = inner_session.query(ObjectInfoIndex._last_modified).filter(ObjectInfoIndex.uuid == obj.uuid).one_or_none()
# Entry is not in the database
if not last_modified:
index.insert(obj, True, session=inner_session)
res = "added"
# Entry is in the database
else:
# OK: already there
if obj.modifyTimestamp == last_modified[0]:
index.log.debug("found up-to-date object index for %s (%s)" % (obj.uuid, obj.dn))
res = "existing"
else:
index.log.debug("updating object index for %s (%s)" % (obj.uuid, obj.dn))
index.update(obj, session=inner_session)
res = "updated"
uuid = obj.uuid
del obj
return res, uuid, ObjectIndex.to_be_updated
def post_process(uuid):
index = PluginRegistry.getInstance("ObjectIndex")
with make_session() as inner_session:
if uuid:
try:
obj = ObjectProxy(uuid)
index.update(obj, session=inner_session)
return True
except Exception as e:
index.log.warning("not post-processing %s: %s" % (uuid, str(e)))
traceback.print_exc()
return False
return False
def resolve_children(dn):
index = PluginRegistry.getInstance("ObjectIndex")
index.log.debug("found object '%s'" % dn)
res = {}
children = index.factory.getObjectChildren(dn)
res = {**res, **children}
for chld in children.keys():
res = {**res, **resolve_children(chld)}
return res
@implementer(IInterfaceHandler)
class BackendRegistry(Plugin):
_target_ = 'core'
_priority_ = 99
def __init__(self):
self.env = Environment.getInstance()
self.log = logging.getLogger(__name__)
@Command(__help__=N_("Register a backend to allow MQTT access"))
def registerBackend(self, uuid, password, url=None, type=BackendTypes.unknown):
with make_session() as session:
query = session.query(RegisteredBackend).filter(or_(RegisteredBackend.uuid == uuid,
RegisteredBackend.url == url))
if query.count() > 0:
# delete old entries
query.delete()
rb = RegisteredBackend(
uuid=uuid,
password=password,
url=url,
type=type
)
session.add(rb)
session.commit()
@Command(__help__=N_("Unregister a backend from MQTT access"))
def unregisterBackend(self, uuid):
with make_session() as session:
backend = session.query(RegisteredBackend).filter(RegisteredBackend.uuid == uuid).one_or_none()
if backend is not None:
session.delete(backend)
session.commit()
def check_auth(self, uuid, password):
if hasattr(self.env, "core_uuid") and self.env.core_uuid == uuid and self.env.core_key == password:
return True
with make_session() as session:
backend = session.query(RegisteredBackend).filter(RegisteredBackend.uuid == uuid).one_or_none()
if backend is not None:
return backend.validate_password(password)
return False
def get_type(self, uuid):
# do not use DB if we want to identify ourselves
if hasattr(self.env, "core_uuid") and self.env.core_uuid == uuid:
return BackendTypes.proxy if self.env.mode == "proxy" else BackendTypes.active_master
with make_session() as session:
try:
res = session.query(RegisteredBackend.type).filter(RegisteredBackend.uuid == uuid).one_or_none()
return res[0] if res is not None else None
except Exception as e:
self.log.error('Error querying backend type from db: %s' % str(e))
return None
|
lgpl-2.1
| -7,332,828,401,185,501,000
| 39.265225
| 240
| 0.534066
| false
| 4.380829
| false
| false
| false
|
amwelch/ifpy
|
ifpy/parse.py
|
1
|
2325
|
import dpkt
import humanfriendly
import nids
import sys
import pandas as pd
import socket
ips = {}
ip_to_domain = {}
def handle_tcp_stream(tcp):
end_states = (nids.NIDS_CLOSE, nids.NIDS_TIMEOUT, nids.NIDS_RESET)
ports = [80, 443]
if tcp.addr[1][1] not in ports:
return
global ips
if tcp.nids_state == nids.NIDS_JUST_EST:
tcp.client.collect = 1
tcp.server.collect = 1
elif tcp.nids_state == nids.NIDS_DATA:
tcp.discard(0)
elif tcp.nids_state in end_states:
ip = tcp.addr[1][0]
ips.setdefault(ip, 0)
ips[ip] += len(tcp.client.data[:tcp.client.count]) + len(tcp.server.data[:tcp.server.count])
def udp_callback(addrs, payload, pkt):
if addrs[0][1] != 53:
return
dns = dpkt.dns.DNS(payload)
global ip_to_domain
for q in dns.qd:
for a in dns.an:
try:
ip = socket.inet_ntoa(a.ip)
ip_to_domain[ip] = a.name
except AttributeError:
pass
return
def extract(pcap_file):
global ip_to_domain
global ips
ips = {}
ip_to_domain = {}
nids.param("tcp_workarounds", 1)
nids.param("scan_num_hosts", 0) # disable portscan detection
nids.chksum_ctl([('0.0.0.0/0', False)]) # disable checksumming
nids.param("filename", pcap_file)
nids.init()
nids.register_tcp(handle_tcp_stream)
nids.register_udp(udp_callback)
try:
nids.run()
except Exception, e:
print "Exception ", pcap_file + " ", e
return
data = []
columns = ('name', 'bytes')
for ip, byte in ips.iteritems():
name = ip_to_domain.get(ip)
if name is None:
try:
name, alias, addresslist = socket.gethostbyaddr(ip)
name += ' (rDNS)'
except socket.herror as e:
name = ip
data.append([str(name), byte])
df = pd.DataFrame(data, columns=columns)
df = df.groupby('name', as_index=False).sum()
df = df.sort('bytes', ascending=False)
df['human_bytes'] = df.apply(lambda row: humanfriendly.format_size(row['bytes']), axis=1)
return df
if __name__ == "__main__":
for f in sys.argv[1:]:
print f
df = extract(f)
if df is not None:
print df.head(10)
|
mit
| -3,545,299,151,492,854,000
| 26.352941
| 100
| 0.564301
| false
| 3.274648
| false
| false
| false
|
yahoo/bossmashup
|
templates/publisher.py
|
1
|
3761
|
#Copyright (c) 2011 Yahoo! Inc. All rights reserved. Licensed under the BSD License.
# See accompanying LICENSE file or http://www.opensource.org/licenses/BSD-3-Clause for the specific language governing permissions and limitations under the License.
"""
Main class here is Serp (Search Engine Results Page)
This is a simple templating library for binding search results with html templates
Check out the california dir to see how templates are formatted and feel free to model to create your own
Look at examples/ex1 in the root directory to see how to use Serp
If you're looking for a more power templating library, try clearsilver
"""
__author__ = "BOSS Team"
from collections import defaultdict
from os.path import abspath
from util import console
from yos.yql.db import strip_prep
def serp(tr, title, endpoint, results):
html = open(tr + "/page/page.html", "r").read()
ht = tr + "/page/page.css"
at = tr + "/result/result.css"
html = html.replace("<?header_background_img_dir?>", tr + "/page/", 1)
html = html.replace("<?header_css?>", ht, 1)
html = html.replace("<?header_abstract_css?>", at, 1)
html = html.replace("<?header_title?>", title, 1)
html = html.replace("<?header_endpoint?>", endpoint, 1)
return html.replace("<?header_results?>", "".join(results), 1)
def set_result(html, url, title, abstract, dispurl, source, imageurl):
html = html.replace("<?result_imageurl?>", imageurl, 1)
html = html.replace("<?result_source?>", source, 1)
html = html.replace("<?result_clickurl?>", url, 1)
html = html.replace("<?result_title?>", title, 1)
html = html.replace("<?result_abstract?>", abstract, 1)
return html.replace("<?result_dispurl?>", dispurl, 1)
def scratch_result(template, url, title, abstract="", dispurl="", source="", imageurl=""):
html = open(template, "r").read()
return set_result(html, url, title, abstract, dispurl, source, imageurl)
def prepare_row(row):
""" Just removes namespacing in the field names """
nr = defaultdict(lambda: "")
existing = map(lambda item: (strip_prep(item[0]), item[1]), row.iteritems())
nr.update(existing)
return nr
class Serp:
def __init__(self, template_dir, title, endpoint, result_template="result_default.html", maker=set_result):
"""
template_dir specifies which template directory to use e.g. 'templates/california' that is provided
title is the title of the search results html webpage
result_template is an optional parameter to specifying another search result template
maker is a function that follows the result template design to bind html e.g. set_result sets <?result_title?>
"""
self._tr = abspath(template_dir.rstrip("/"))
self._title = title
self._endpoint = endpoint
self._html = open(self._tr + "/result/" + result_template, "r").read()
self.results = []
self._maker = maker
def add(self, url, title, abstract="", dispurl="", source="", imageurl=""):
self.results.append( self._maker(self._html, url, title, abstract, dispurl, source, imageurl) )
def _bind_row(self, row):
nr = prepare_row(row)
return self.add(nr["clickurl"], nr["title"], nr["abstract"], nr["dispurl"], nr["source"], nr["imageurl"])
def bind_table(self, table):
"""
If the table contains rows (dictionaries) which have the fields referenced in _bind_row,
then just pass the table here and forget doing a loop around the add call
"""
for row in table.rows:
self._bind_row(row)
def dumps(self):
""" Return resulting html as a string """
return console.strfix(serp(self._tr, self._title, self._endpoint, results=self.results))
def dump(self, f):
""" Save resulting html as a file named f """
o = open(f, "w")
o.write(self.dumps())
o.close()
|
bsd-3-clause
| 4,224,145,653,442,078,700
| 41.258427
| 165
| 0.683595
| false
| 3.568311
| false
| false
| false
|
ainur-fa/python_training_1
|
test/test_del_contact_in_group.py
|
1
|
1061
|
# -*- coding: utf-8 -*-
from model.contact import Contact
from model.group import Group
import random
def test_del_contact_in_group(app, db, orm):
list =[]
app.contact.check_available_min_requirement(app, db)
group_list = db.get_group_list()
for this_group in group_list:
contacts_in_group = orm.get_contacts_in_group(Group(id=this_group.id))
[list.append(elem) for elem in contacts_in_group if elem not in list]
if list ==[]:
group = random.choice(db.get_group_list())
contact = random.choice(db.get_contact_list())
app.contact.add_contact_to_group(contact.id, group.id)
list.append(orm.get_contact_list()[0])
contact = random.choice(list)
group = random.choice(orm.get_group_where_contact(Contact(id=contact.id)))
old_contacts = orm.get_contacts_in_group(group)
app.contact.del_contact_in_group(contact.id, group.id)
new_contacts = orm.get_contacts_in_group(group)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
|
apache-2.0
| 7,627,593,699,910,538,000
| 41.44
| 101
| 0.681433
| false
| 3.167164
| false
| false
| false
|
francisbrochu/microbiome-summer-school-2017_mass-spec
|
example/tutorial_code/alignment.py
|
1
|
3212
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from .spectrum import Spectrum
from .spectrum_utils import take_closest, binary_search_mz_values
from subprocess import call
from os.path import join
from os import remove
class Mass_Spectra_Aligner():
def __init__(self, window_size=10):
self.window_size = window_size
self.reference_mz = []
def fit(self, spectra):
self._train(spectra)
def _train(self, spectra):
"""
Fill the reference_mz attribute with possible m/z values.
:param spectra: A set of spectrum object.
:return: Nothing
"""
path = "tutorial_code/cpp_extensions"
self._write_mz_values_to_file(spectra, path)
call([str(join(path, "alignment")),
"temp_spectra.csv",
str(self.window_size)])
self.reference_mz = self._read_reference_from_file(path)
def transform(self, spectra):
new_spectra = []
for i, s in enumerate(spectra):
new_spectra.append(self._apply(s))
return np.asarray(new_spectra)
def _apply(self, spec):
# Find closest point that is not outside possible window
# If point: change mz
# Else: keep or discard m/z?
aligned_mz = []
aligned_int = []
nf_mz = []
nf_int = []
for i, mz in enumerate(spec.mz_values):
possible_matches = []
try:
possible_matches = binary_search_mz_values(self.reference_mz, mz,
float(self.window_size))
except ValueError:
nf_mz.append(mz)
nf_int.append(spec.intensity_values[i])
continue
if (len(possible_matches) > 1):
possible_matches = [take_closest(possible_matches, mz)]
if (len(possible_matches) == 1):
aligned_mz.append(possible_matches[0])
aligned_int.append(spec.intensity_values[i])
else:
aligned_mz.append(mz)
aligned_int.append(spec.intensity_values[i])
nf_mz.append(mz)
nf_int.append(spec.intensity_values[i])
return Spectrum(np.asarray(aligned_mz), np.asarray(aligned_int),
spec.mz_precision, spec.metadata)
def _write_mz_values_to_file(self, spectra, path):
filename = "temp_spectra.csv"
f = open(filename,"w")
for s in spectra:
line = ""
for mz in s.mz_values:
line += str(mz)
line += ","
line = line[:-1]
line += "\n"
f.write(line)
f.close()
def _read_reference_from_file(self, path):
filename = "alignmentPoints.txt"
f = open(filename,"r")
line = f.readline().strip().split(" ")
mz_values = []
for mz in line:
mz_values.append(round(float(mz),4))
#clear temporary files
#remove("temp_spectra.csv")
#remove(filename)
return np.asarray(mz_values)
|
mit
| -4,486,410,587,900,100,000
| 30.194175
| 83
| 0.540473
| false
| 3.902795
| false
| false
| false
|
google-research/rigl
|
rigl/experimental/jax/pruning/init_test.py
|
1
|
8125
|
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.pruning.init."""
from typing import Any, Mapping, Optional
from absl.testing import absltest
import flax
import jax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import init
from rigl.experimental.jax.pruning import masked
class MaskedDense(flax.nn.Module):
"""Single-layer Dense Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
mask = None):
inputs = inputs.reshape(inputs.shape[0], -1)
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Dense,
mask=layer_mask,
kernel_init=flax.nn.initializers.kaiming_normal())
class MaskedDenseSparseInit(flax.nn.Module):
"""Single-layer Dense Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
*args,
mask = None,
**kwargs):
inputs = inputs.reshape(inputs.shape[0], -1)
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Dense,
mask=layer_mask,
kernel_init=init.kaiming_sparse_normal(
layer_mask['kernel'] if layer_mask is not None else None),
**kwargs)
class MaskedCNN(flax.nn.Module):
"""Single-layer CNN Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
mask = None):
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Conv,
kernel_size=(3, 3),
mask=layer_mask,
kernel_init=flax.nn.initializers.kaiming_normal())
class MaskedCNNSparseInit(flax.nn.Module):
"""Single-layer CNN Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
*args,
mask = None,
**kwargs):
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Conv,
kernel_size=(3, 3),
mask=layer_mask,
kernel_init=init.kaiming_sparse_normal(
layer_mask['kernel'] if layer_mask is not None else None),
**kwargs)
class InitTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._rng = jax.random.PRNGKey(42)
self._batch_size = 2
self._input_shape = ((self._batch_size, 28, 28, 1), jnp.float32)
self._input = jnp.ones(*self._input_shape)
def test_init_kaiming_sparse_normal_output(self):
"""Tests the output shape/type of kaiming normal sparse initialization."""
input_array = jnp.ones((64, 16), jnp.float32)
mask = jax.random.bernoulli(self._rng, shape=(64, 16))
base_init = flax.nn.initializers.kaiming_normal()(self._rng,
input_array.shape,
input_array.dtype)
sparse_init = init.kaiming_sparse_normal(mask)(self._rng, input_array.shape,
input_array.dtype)
with self.subTest(name='test_sparse_init_output_shape'):
self.assertSequenceEqual(sparse_init.shape, base_init.shape)
with self.subTest(name='test_sparse_init_output_dtype'):
self.assertEqual(sparse_init.dtype, base_init.dtype)
with self.subTest(name='test_sparse_init_output_notallzero'):
self.assertTrue((sparse_init != 0).any())
def test_dense_no_mask(self):
"""Checks that in the special case of no mask, init is same as base_init."""
_, initial_params = MaskedDense.init_by_shape(self._rng,
(self._input_shape,))
self._unmasked_model = flax.nn.Model(MaskedDense, initial_params)
_, initial_params = MaskedDenseSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=None)
self._masked_model_sparse_init = flax.nn.Model(MaskedDenseSparseInit,
initial_params)
self.assertTrue(
jnp.isclose(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'], self._unmasked_model.params['MaskedModule_0']
['unmasked']['kernel']).all())
def test_dense_sparse_init_kaiming(self):
"""Checks kaiming normal sparse initialization for dense layer."""
_, initial_params = MaskedDense.init_by_shape(self._rng,
(self._input_shape,))
self._unmasked_model = flax.nn.Model(MaskedDense, initial_params)
mask = masked.simple_mask(self._unmasked_model, jnp.ones,
masked.WEIGHT_PARAM_NAMES)
_, initial_params = MaskedDenseSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=mask)
self._masked_model_sparse_init = flax.nn.Model(MaskedDenseSparseInit,
initial_params)
mean_init = jnp.mean(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
stddev_init = jnp.std(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
mean_sparse_init = jnp.mean(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
stddev_sparse_init = jnp.std(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
with self.subTest(name='test_cnn_sparse_init_mean'):
self.assertBetween(mean_sparse_init, mean_init - 2 * stddev_init,
mean_init + 2 * stddev_init)
with self.subTest(name='test_cnn_sparse_init_stddev'):
self.assertBetween(stddev_sparse_init, 0.5 * stddev_init,
1.5 * stddev_init)
def test_cnn_sparse_init_kaiming(self):
"""Checks kaiming normal sparse initialization for convolutional layer."""
_, initial_params = MaskedCNN.init_by_shape(self._rng, (self._input_shape,))
self._unmasked_model = flax.nn.Model(MaskedCNN, initial_params)
mask = masked.simple_mask(self._unmasked_model, jnp.ones,
masked.WEIGHT_PARAM_NAMES)
_, initial_params = MaskedCNNSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=mask)
self._masked_model_sparse_init = flax.nn.Model(MaskedCNNSparseInit,
initial_params)
mean_init = jnp.mean(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
stddev_init = jnp.std(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
mean_sparse_init = jnp.mean(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
stddev_sparse_init = jnp.std(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
with self.subTest(name='test_cnn_sparse_init_mean'):
self.assertBetween(mean_sparse_init, mean_init - 2 * stddev_init,
mean_init + 2 * stddev_init)
with self.subTest(name='test_cnn_sparse_init_stddev'):
self.assertBetween(stddev_sparse_init, 0.5 * stddev_init,
1.5 * stddev_init)
if __name__ == '__main__':
absltest.main()
|
apache-2.0
| -4,098,234,479,641,906,000
| 34.021552
| 80
| 0.617231
| false
| 3.609507
| true
| false
| false
|
ayosec/pyslide
|
Pyslide/Presentation/Pages.py
|
1
|
9277
|
# -*- coding: latin1 -*-
#
# Copyright (C) 2003, 2004 Ayose Cazorla León
#
# Authors
# Ayose Cazorla <ayose.cazorla@hispalinux.es>
#
# This file is part of Pyslide.
#
# Pyslide is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Pyslide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pyslide; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import pygame
def renderbackground(surface, background):
'''renderbackground(surface, background)
Renders background on surface. background can be:
* a pygame.Surface object (use blit)
* a tuple, interpreted as a color (use fill)
* None.. do nothing
'''
if isinstance(background, tuple):
surface.fill(background)
elif isinstance(background, pygame.Surface):
surface.blit(background, (0,0))
elif background is not None:
raise TypeError, 'background has to be Surface, tuple or None'
def createbackground(attrs):
'''createbackground(attrs) -> background
Create a background for this attributes. "background" can be:
* a pygame.Surface object
* a tuple: this is a color used to fill the background
* None: there is no background for this attributes.
'''
from Pyslide import misc
from Pyslide.Presentation import CurrentSurface
size = CurrentSurface.width, CurrentSurface.height
if attrs.has_key('bggrad'):
try:
top, bottom = attrs['bggrad'].split('-', 1)
except ValueError:
raise misc.PyslideError, 'Invalid value for bggrad: ' + attrs['bggrad']
top = misc.parse_color(top)
bottom = misc.parse_color(bottom)
if None in (top, bottom):
raise misc.PyslideError, 'Invalid gradient value: ' + attrs['bggrad']
bg = pygame.Surface(size)
grad = misc.RenderGradient(bg, top, bottom)
pygame.surfarray.blit_array(bg, grad)
elif attrs.has_key('bg'):
scale = attrs.get('bgscale', 'yes') == 'yes'
from Pyslide.Main.Images import imageloader
bg = imageloader.loadfile(attrs['bg'])
if scale:
bg = pygame.transform.scale(bg, size).convert()
else:
s = bg.get_size()
if (s[0] < size[0]) or (s[1] < size[1]):
i = pygame.Surface(size).convert()
i.fill(0)
i.blit(bg, (0,0))
bg = i
else:
bg = bg.convert()
elif attrs.has_key('bgcolor'):
bg = misc.parse_color(attrs['bgcolor'])
if bg is None:
raise misc.PyslideError, 'Invalid color: ' + attrs['bgcolor']
else:
bg = None
return bg
def applycss(item, group):
'''applycss(attrs, item, group) -> item
Get attributes for the item. Returns the item type and its attributes
'''
from Pyslide.misc import PyslideError
parentobj = group.parent().parent()
# its own attributes
newattrs = item['attrs'].copy()
itemtype = item['type']
# class attributes
if newattrs.has_key('class'):
c = newattrs['class']
del newattrs['class']
try:
classattrs = parentobj.cssclass[c].items()
except KeyError:
raise PyslideError, 'Unknown class: ' + c
for key, val in classattrs:
if not newattrs.has_key(key):
newattrs[key] = val
# alias attributes
if parentobj.cssalias.has_key(itemtype):
alias = parentobj.cssalias[itemtype]
if itemtype not in ('text', 'image', 'system', 'shape', 'list'):
try:
itemtype = alias['item-type']
except:
raise PyslideError, \
'Invalid alias "%s": item-type attribute not present' % itemtype
for key, val in alias.items():
if not newattrs.has_key(key):
newattrs[key] = val
# group attibutes
for key, val in group.attrs.items():
if not newattrs.has_key(key):
newattrs[key] = val
# remove for- attributes, if it is necessary
posgroup = group.getposition(item) + 1
for key in newattrs.keys():
if key.startswith('for-'):
place = key.split('-')[1]
put = False
try:
# is it a number?
put = (int(place) == posgroup)
except ValueError:
place = place.lower()
v = ['first', 'second', 'third']
if place in v:
put = (v.index(place) + 1) == posgroup
elif (place == 'even' and (posgroup % 2) == 0) \
or (place == 'odd' and (posgroup % 2) == 1):
put = True
elif place == 'last':
put = group.isthelast(item)
if put:
k = '-'.join(key.split('-')[2:])
if not newattrs.has_key(k):
newattrs[k] = newattrs[key]
del newattrs[key]
# THE item!
return {'type': itemtype, 'content': item['content'], 'attrs': newattrs}
class CreatedPage:
def __init__(self, attrs, groups):
self.groups = groups
self.attrs = attrs
self.stage = 0
self.__bg = None
if attrs.has_key('ttl'):
try:
self.ttl = int(attrs['ttl']) / 10.
except ValueError:
raise PyslideError, 'Invalid TTL value: ' + str(attrs['ttl'])
else:
self.ttl = None
def currentgroups(self):
return self.groups[:self.stage+1]
def getcurrentgroup(self):
return self.groups[self.stage]
def nextstage(self):
if self.stage < len(self.groups) - 1:
self.stage += 1
return self.groups[self.stage]
return None
def prevstage(self):
if self.stage > 0:
self.stage -= 1
return self.groups[self.stage + 1]
return None
def setstage(self, n):
if n < 0:
self.stage = len(self.groups) + n
else:
self.stage = n
def getbackground(self):
if self.__bg is None:
self.__bg = createbackground(self.attrs)
return self.__bg
def iskeepalways(item, group):
'''iskeepalways(item, group) -> bool
Returns True if item is a keep-always item
'''
def i():
yield item['attrs']
yield group.attrs
p = group.parent().parent()
if p.cssalias.has_key(item['type']):
yield p.cssalias[item['type']]
if item['attrs'].has_key('class'):
c = item['attrs']['class']
if p.cssclass.has_key(c):
yield p.cssclass[c]
for attrs in i():
if attrs.has_key('keep-always'):
return attrs['keep-always'] == 'yes'
return False
class Page:
def __init__ (self, page):
self.page = page
def getkeepalwaysitems(self):
# we have to create all the previous items to
# the keep-always items, because that items may
# need the LastPoint info.
# First, create a flat list of items
from copy import copy as C
items = []
for g in self.page.groups():
x = C(g.items())
for i in x: i['parent-group'] = g
items += x
# find the last keep-always item
last = -1
keepalwaysitems = []
for n, i in enumerate(items):
if iskeepalways(i, i['parent-group']):
last = n
keepalwaysitems.append(i)
from Pyslide import Items
result = []
lp = Items.LastPoint()
if last >= 0:
for item in items[:last+1]:
i = self.createitem(item, item['parent-group'], lp)
if item in keepalwaysitems:
result.append(i)
return result
def createitem(origitem, group, lp):
from Pyslide import Items
from copy import copy as C
item = applycss(origitem, group)
try:
itemtype = Items.getitemtype(item['type'])
except KeyError:
from Pyslide import misc
raise misc.PyslideError, 'invalid item: ' + item['type']
try:
i = itemtype((origitem, group), C(item['content']), C(item['attrs']), lp)
except Items.ReplaceThisItem, (t,):
i = t
return i
createitem = staticmethod(createitem)
def createpage(self):
from Pyslide import Items
groups = []
lp = Items.LastPoint()
for group in self.page.groups():
groups.append([self.createitem(i, group, lp) for i in group.items()])
return CreatedPage(self.page.get_attrs(), groups)
|
gpl-2.0
| -1,627,606,097,163,363,300
| 27.283537
| 85
| 0.556214
| false
| 3.990108
| false
| false
| false
|
FrankNagel/qlc
|
src/webapp/quanthistling/scripts/annotations/annotations_for_jakway2008.py
|
1
|
8208
|
# -*- coding: utf8 -*-
import sys, os
sys.path.append(os.path.abspath('.'))
import re
from operator import attrgetter
import difflib
# Pylons model init sequence
import pylons.test
import logging
from quanthistling.config.environment import load_environment
from quanthistling.model.meta import Session, metadata
from quanthistling import model
import quanthistling.dictdata.books
from paste.deploy import appconfig
import functions
from manualannotations_for_jakway2008 import manual_entries
def get_bold_annotations(entry):
sorted_annotations = [ a for a in entry.annotations if a.value=='bold']
sorted_annotations = sorted(sorted_annotations, key=attrgetter('start'))
last_bold_end = -1
at_start = True
last_bold_start = sorted_annotations[0].start
head_starts = []
head_ends = []
for a in sorted_annotations:
if (a.start <= (last_bold_end + 1)):
last_bold_end = a.end
else:
head_starts.append(last_bold_start)
head_ends.append(last_bold_end)
last_bold_start = a.start
last_bold_end = a.end
head_starts.append(last_bold_start)
head_ends.append(last_bold_end)
return head_starts, head_ends
def annotate_head(entry):
# delete head annotations
head_annotations = [ a for a in entry.annotations if a.value=='head' or a.value=="iso-639-3" or a.value=="doculect"]
for a in head_annotations:
Session.delete(a)
head = None
heads = []
sorted_annotations = [ a for a in entry.annotations if a.value=='bold']
sorted_annotations = sorted(sorted_annotations, key=attrgetter('start'))
head_starts, head_ends = get_bold_annotations(entry)
heads = []
for i in range(len(head_starts)):
head_start_pos = head_starts[i]
head_end_pos = head_ends[i]
#head_end_pos = functions.get_last_bold_pos_at_start(entry)
#head_start_pos = 0
if head_end_pos > -1:
start = head_start_pos
substr = entry.fullentry[head_start_pos:head_end_pos]
for match in re.finditer(r', ?', substr):
end = match.start(0) + head_start_pos
inserted_head = functions.insert_head(entry, start, end)
#entry.append_annotation(start, end, u'head', u'dictinterpretation')
heads.append(inserted_head)
start = match.end(0) + head_start_pos
end = head_end_pos
inserted_head = functions.insert_head(entry, start, end)
#entry.append_annotation(start, end, u'head', u'dictinterpretation')
heads.append(inserted_head)
else:
print "no head"
print entry.fullentry.encode('utf-8')
return heads
def annotate_head_without_comma(entry):
# delete head annotations
head_annotations = [ a for a in entry.annotations if a.value=='head' or a.value=="iso-639-3" or a.value=="doculect"]
for a in head_annotations:
Session.delete(a)
head = None
heads = []
sorted_annotations = [ a for a in entry.annotations if a.value=='bold']
sorted_annotations = sorted(sorted_annotations, key=attrgetter('start'))
head_starts, head_ends = get_bold_annotations(entry)
heads = []
for i in range(len(head_starts)):
head_start_pos = head_starts[i]
head_end_pos = head_ends[i]
#head_end_pos = functions.get_last_bold_pos_at_start(entry)
#head_start_pos = 0
if head_end_pos > -1:
inserted_head = functions.insert_head(entry, head_start_pos, head_end_pos)
heads.append(inserted_head)
else:
print "no head"
print entry.fullentry.encode('utf-8')
return heads
def annotate_translations(entry):
# delete translation annotations
trans_annotations = [ a for a in entry.annotations if a.value=='translation']
for a in trans_annotations:
Session.delete(a)
#head_end_pos = functions.get_last_bold_pos_at_start(entry)
head_starts, head_ends = get_bold_annotations(entry)
for i in range(len(head_starts)):
trans_start_pos = head_ends[i]
if len(head_starts) > i+1:
trans_end_pos = head_starts[i+1]
else:
trans_end_pos = len(entry.fullentry)
if trans_start_pos > -1:
substr = entry.fullentry[trans_start_pos:trans_end_pos]
start = trans_start_pos
for match in re.finditer(r'(?:, ?|; ?|\d\) )', substr):
mybreak = False
# are we in a bracket?
for m in re.finditer(r'\(.*?\)', substr):
if match.start(0) >= m.start(0) and match.end(0) <= m.end(0):
mybreak = True
if not mybreak:
end = match.start(0) + trans_start_pos
if end > start and not re.match(r' +$', entry.fullentry[start:end]):
functions.insert_translation(entry, start, end)
start = match.end(0) + trans_start_pos
end = trans_end_pos
if end > start and not re.match(r'^ +$', entry.fullentry[start:end]):
functions.insert_translation(entry, start, end)
def main(argv):
bibtex_key = u"jakway2008"
if len(argv) < 2:
print "call: annotations_for%s.py ini_file" % bibtex_key
exit(1)
ini_file = argv[1]
conf = appconfig('config:' + ini_file, relative_to='.')
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
dictdatas = Session.query(model.Dictdata).join(
(model.Book, model.Dictdata.book_id==model.Book.id)
).filter(model.Book.bibtex_key==bibtex_key).all()
for dictdata in dictdatas:
entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id).all()
#entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id,startpage=109,pos_on_page=18).all()
#entries = []
startletters = set()
for e in entries:
if dictdata.startpage == 129:
heads = annotate_head_without_comma(e)
else:
heads = annotate_head(e)
if not e.is_subentry:
for h in heads:
if len(h) > 0:
startletters.add(h[0].lower())
annotate_translations(e)
dictdata.startletters = unicode(repr(sorted(list(startletters))))
Session.commit()
for e in manual_entries:
dictdata = model.meta.Session.query(model.Dictdata).join(
(model.Book, model.Dictdata.book_id==model.Book.id)
).filter(model.Book.bibtex_key==bibtex_key).filter("startpage<=:pagenr and endpage>=:pagenr").params(pagenr=int(e["startpage"])).first()
entry_db = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id, startpage=e["startpage"], pos_on_page=e["pos_on_page"]).first()
if difflib.SequenceMatcher(None, e["fullentry"].decode('utf-8'), entry_db.fullentry).ratio() > 0.95:
entry_db.fullentry = e["fullentry"].decode('utf-8')
# delete all annotations in db
for a in entry_db.annotations:
Session.delete(a)
# insert new annotations
for a in e["annotations"]:
entry_db.append_annotation(a["start"], a["end"], a["value"].decode('utf-8'), a["type"].decode('utf-8'), a["string"].decode('utf-8'))
else:
print "We have a problem, manual entry on page %i pos %i seems not to be the same entry as in db, it was not inserted to db. Please correct the problem." % (e["startpage"], e["pos_on_page"])
Session.commit()
if __name__ == "__main__":
main(sys.argv)
|
gpl-3.0
| -4,918,240,981,853,853,000
| 35.488584
| 202
| 0.577851
| false
| 3.670841
| false
| false
| false
|
heejongahn/hjlog
|
hjlog/models/post.py
|
1
|
1123
|
from datetime import datetime
from hjlog import db
# Tag helper table
tags = db.Table(
'tags',
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')),
db.Column('post_id', db.Integer, db.ForeignKey('post.id')))
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), index=True, unique=True)
body = db.Column(db.Text)
datetime = db.Column(db.DateTime)
category = db.Column(db.String(20))
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
private = db.Column(db.Boolean)
tags = db.relationship(
'Tag',
secondary=tags,
backref=db.backref('describes', lazy='dynamic'))
photos = db.relationship('Photo', backref='original')
def __init__(self, title, body, category, author, private, tags):
self.title = title
self.body = body
self.category = category
self.author = author
self.tags = tags
self.private = private
self.datetime = datetime.now()
def is_invisible_by(self, user):
return self.private and self.author != user
|
mit
| -7,618,970,135,233,268,000
| 30.194444
| 69
| 0.631345
| false
| 3.509375
| false
| false
| false
|
kirillmorozov/youbot_control
|
scripts/client_gui.py
|
1
|
32532
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
"""Client GUI to control youBot robot."""
import Tkinter as tk
import ttk
import tkMessageBox
import rospyoubot
from math import radians, degrees
class MainApplication(ttk.Frame):
u"""Основное окно приложения."""
def __init__(self, parent, *args, **kwargs):
u"""Конструктор основного окна."""
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.grid(sticky='nswe')
self.columnconfigure(0, weight=1)
# self.columnconfigure(1, weight=1)
self.style = ttk.Style()
self.style.theme_use('clam')
self.notebook = ttk.Notebook(self)
self.notebook.grid(column=0, row=0, sticky='nswe')
self.manual_controls = ControlsPage(self.notebook)
self.notebook.add(self.manual_controls,
text='Ручное управление',
sticky='nswe')
self.automatic_controls = AutomaticControls(self.notebook)
self.notebook.add(self.automatic_controls,
text='Автоматическое управление',
sticky='nswe')
class ControlsPage(ttk.Frame):
u"""Вкладка управления."""
def __init__(self, parent):
u"""Конструктор класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
# Arm joints controls
self.joints_controls = JointsControlsFrame(self)
self.joints_controls.grid(column=1, row=0, sticky='nswe')
# Odometry
self.odometry = OdometryFrame(self)
self.odometry.grid(column=1, row=1, sticky='nswe')
# Base controls
self.base_control = BaseControl(self)
self.base_control.grid(column=1, row=2, sticky='nswe')
# Padding
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
class OdometryFrame(ttk.LabelFrame):
u"""Фрейм одометрии."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.LabelFrame.__init__(self, parent, text='Одометрия:')
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.odom_x = tk.StringVar()
self.odom_x.set('x')
self.odom_y = tk.StringVar()
self.odom_y.set('y')
self.odom_z = tk.StringVar()
self.odom_z.set('z')
ttk.Label(self, text='X:', width=5, anchor=tk.E).grid(column=0, row=0)
ttk.Label(self,
textvariable=ODOMETRY[0],
width=6,
anchor=tk.W).grid(column=1, row=0)
ttk.Label(self, text='Y:', width=5, anchor=tk.E).grid(column=0, row=1)
ttk.Label(self,
textvariable=ODOMETRY[1],
width=6,
anchor=tk.W).grid(column=1, row=1)
ttk.Label(self, text=u'\u03c6:', width=5, anchor=tk.E).grid(column=0,
row=2)
ttk.Label(self,
textvariable=ODOMETRY[2],
width=6,
anchor=tk.W).grid(column=1, row=2)
for child in self.winfo_children():
child.grid_configure(padx=2, pady=2)
class JointsControlsFrame(ttk.LabelFrame):
u"""Фрейм управления степенями подвижности."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.LabelFrame.__init__(self, parent, text='Управление манипулятором:')
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.a1_joint = JointControl(self, 1)
self.a1_joint.grid(row=0, columnspan=2, sticky='nswe')
self.a2_joint = JointControl(self, 2)
self.a2_joint.grid(row=1, columnspan=2, sticky='nswe')
self.a3_joint = JointControl(self, 3)
self.a3_joint.grid(row=2, columnspan=2, sticky='nswe')
self.a4_joint = JointControl(self, 4)
self.a4_joint.grid(row=3, columnspan=2, sticky='nswe')
self.a5_joint = JointControl(self, 5)
self.a5_joint.grid(row=4, columnspan=2, sticky='nswe')
self.gripper = GripperControl(self)
self.gripper.grid(row=5, columnspan=2, sticky='nswe')
self.home_button = ttk.Button(self, text='Домой', width=6)
self.home_button.grid(row=6, column=0, sticky='nswe')
self.home_button.bind('<Button-1>', self.go_home)
self.home_button = ttk.Button(self, text='Свеча', width=6)
self.home_button.grid(row=6, column=1, sticky='nswe')
self.home_button.bind('<Button-1>', self.go_candle)
for child in self.winfo_children():
child.grid_configure(padx=2, pady=2)
def go_home(self, *args):
u"""Отправляет манипулятор в домашнюю позицию."""
R1.arm.set_joints_angles(0.016,
0.04,
-0.072,
0.0432,
2.839)
def go_candle(self, *args):
u"""Приводит манипулятор в положение свечки."""
R1.arm.set_joints_angles(2.9400474018133402,
1.1251030074812907,
-2.5235000069592695,
1.769468876296561,
2.838871440356912)
class JointControl(ttk.Frame):
u"""Фрейм управления отдельной степенью."""
def __init__(self, parent, joint):
u"""Инициализация класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.joint = joint
self.label = 'A{}:'.format(joint)
self.angle = tk.StringVar()
ttk.Label(self, text=self.label, width=6, anchor='e').grid(column=0,
row=0,
sticky=tk.E)
self.minus_button = ttk.Button(self, text='-', width=7)
self.minus_button.grid(column=1, row=0)
self.minus_button.bind('<Button-1>', self.minus_button_press)
self.minus_button.bind('<ButtonRelease-1>', key_released)
self.state_label = ttk.Label(self,
textvariable=ARM_JOINTS_ANGLES[joint-1],
width=5,
anchor=tk.CENTER)
self.state_label.grid(column=2, row=0, sticky='nswe')
self.plus_button = ttk.Button(self, text='+', width=7)
self.plus_button.grid(column=3, row=0)
self.plus_button.bind('<Button-1>', self.plus_button_press)
self.plus_button.bind('<ButtonRelease-1>', key_released)
def plus_button_press(self, *args):
u"""Задаёт скорость оси, при нажатии на кнопку '+'."""
vel = ARM_VELOCITY
arm_velocities = [vel if x == self.joint - 1 else 0 for x in range(5)]
R1.arm.set_joints_velocities(*arm_velocities)
def minus_button_press(self, *args):
u"""Задаёт скорость оси, при нажатии на кнопку '-'."""
vel = -1 * ARM_VELOCITY
arm_velocities = [vel if x == self.joint - 1 else 0 for x in range(5)]
R1.arm.set_joints_velocities(*arm_velocities)
class BaseControl(ttk.LabelFrame):
u"""Фрейм управления движением базы."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.LabelFrame.__init__(self, parent, text='Управление платформой:')
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
controls_style = ttk.Style()
controls_style.configure('base.TButton', font=('TkDefaultFont', 20))
# Rotate left
self.rl_button = ttk.Button(self,
text=u'\u21b6',
width=2,
style='base.TButton')
self.rl_button.grid(column=0, row=0, sticky=tk.SE)
self.rl_button.bind('<Button-1>', self.rl_button_press)
self.rl_button.bind('<ButtonRelease-1>', key_released)
# Forward
self.f_button = ttk.Button(self,
text=u'\u2191',
width=2,
style='base.TButton')
self.f_button.grid(column=1, row=0, sticky=tk.S)
self.f_button.bind('<Button-1>', self.f_button_press)
self.f_button.bind('<ButtonRelease-1>', key_released)
# Rotate right
self.rr_button = ttk.Button(self,
text=u'\u21b7',
width=2,
style='base.TButton')
self.rr_button.grid(column=2, row=0, sticky=tk.SW)
self.rr_button.bind('<Button-1>', self.rr_button_press)
self.rr_button.bind('<ButtonRelease-1>', key_released)
# Left
self.l_button = ttk.Button(self,
text=u'\u2190',
width=2,
style='base.TButton')
self.l_button.grid(column=0, row=1, sticky=tk.NE)
self.l_button.bind('<Button-1>', self.l_button_press)
self.l_button.bind('<ButtonRelease-1>', key_released)
# Backwards
self.b_button = ttk.Button(self,
text=u'\u2193',
width=2,
style='base.TButton')
self.b_button.grid(column=1, row=1, sticky=tk.N)
self.b_button.bind('<Button-1>', self.b_button_press)
self.b_button.bind('<ButtonRelease-1>', key_released)
# Right
self.r_button = ttk.Button(self,
text=u'\u2192',
width=2,
style='base.TButton')
self.r_button.grid(column=2, row=1, sticky=tk.NW)
self.r_button.bind('<Button-1>', self.r_button_press)
self.r_button.bind('<ButtonRelease-1>', key_released)
for child in self.winfo_children():
child.grid_configure(padx=2, pady=2)
def rl_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку RL."""
R1.base.set_velocity(ang_z=BASE_VELOCITY)
def f_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку F."""
R1.base.set_velocity(lin_x=BASE_VELOCITY)
def l_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку L."""
R1.base.set_velocity(lin_y=BASE_VELOCITY)
def r_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку R."""
R1.base.set_velocity(lin_y=-BASE_VELOCITY)
def b_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку B."""
R1.base.set_velocity(lin_x=-BASE_VELOCITY)
def rr_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку RR."""
R1.base.set_velocity(ang_z=-BASE_VELOCITY)
class GripperControl(ttk.Frame):
u"""Фрейм управления гриппером."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.gripper_state = tk.StringVar()
ttk.Label(self,
text='Схват:',
width=6,
anchor='e').grid(column=0,
row=0,
sticky='e')
self.close_button = ttk.Button(self, text='Закрыть', width=7)
self.close_button.grid(column=1, row=0)
self.close_button.bind('<Button-1>', self.close_gripper)
ttk.Label(self,
textvariable=self.gripper_state,
anchor=tk.CENTER,
width=5).grid(column=2, row=0, sticky=(tk.W, tk.E))
self.open_button = ttk.Button(self, text='Открыть', width=7)
self.open_button.grid(column=3, row=0)
self.open_button.bind('<Button-1>', self.open_gripper)
def close_gripper(self, *args):
u"""Закрывает гриппер и записывает 'Closed' в его статус."""
self.gripper_state.set('Закрыт')
R1.arm.gripper.set_gripper_state(False)
def open_gripper(self, *args):
u"""Открывает гриппер и записывает 'Opened' в его статус."""
self.gripper_state.set('Открыт')
R1.arm.gripper.set_gripper_state(True)
class AutomaticControls(ttk.Frame):
u"""Фрейм автоматического управления."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.pt_list = tk.StringVar()
# Points Listbox
self.points_list = tk.Listbox(self,
height=29,
selectmode='browse',
listvariable=self.pt_list)
self.points_list.grid(column=0,
row=0,
sticky='nswe',
rowspan=2,
columnspan=2)
# Buttons frame
self.buttons_frame = ttk.Frame(self)
self.buttons_frame.grid(column=2, row=0, sticky='n')
# Add base button
self.add_base_button = ttk.Button(self.buttons_frame,
text=u'Платформа',
width=9)
self.add_base_button.grid(column=0, row=0, columnspan=2)
self.add_base_button.bind('<Button-1>', self.add_to_list)
# Add arm button
self.add_arm_button = ttk.Button(self.buttons_frame,
text=u'Манипулятор',
width=9)
self.add_arm_button.grid(column=0, row=1, columnspan=2)
self.add_arm_button.bind('<Button-1>', self.add_arm_point)
# Edit button
# ttk.Button(self.buttons_frame,
# text=u'Редактировать',
# width=9).grid(column=0, row=1)
# Remove button
self.grip_open_button = ttk.Button(self.buttons_frame,
text=u'Откр',
width=3)
self.grip_open_button.grid(column=0, row=2)
self.grip_open_button.bind('<Button-1>', self.open_gripper)
self.grip_close_button = ttk.Button(self.buttons_frame,
text=u'Закр',
width=3)
self.grip_close_button.grid(column=1, row=2)
self.grip_close_button.bind('<Button-1>', self.close_gripper)
self.remove_button = ttk.Button(self.buttons_frame,
text=u'Удалить',
width=9)
self.remove_button.grid(column=0, row=3, columnspan=2)
self.remove_button.bind('<Button-1>', self.remove_point)
# Start button
ttk.Button(self.buttons_frame,
text=u'Старт',
width=9,
command=self.start).grid(column=0, row=4, columnspan=2)
# Stop button
ttk.Button(self.buttons_frame,
text=u'Стоп',
width=9).grid(column=0, row=5, columnspan=2)
# Up button
ttk.Button(self, text=u'Вверх', command=self.moveup).grid(column=0, row=2)
# Down button
ttk.Button(self, text=u'Вниз', command=self.movedown).grid(column=1, row=2)
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in self.buttons_frame.winfo_children():
child.grid_configure(padx=5, pady=5)
def add_to_list(self, event):
u"""Добавляет движение в список движений."""
BaseMotionAddition(self)
def add_arm_point(self, event):
u"""Span window to add Arm point."""
ArmMotionAddition(self)
def remove_point(self, event):
u"""Удаляет выбранное движение из списка."""
if len(self.points_list.curselection()) > 0:
index = int(self.points_list.curselection()[0])
points = listbox_to_list(self.pt_list.get())
POINTS_DICT.pop(points.pop(index))
listbox_string = ' '.join(points)
self.pt_list.set(listbox_string)
def start(self):
u"""Запускает выполнение программы движения робота."""
for name in listbox_to_list(self.pt_list.get()):
if name.startswith('Base'):
R1.base.set_velocity(0, 0, 0)
R1.base.lin(*POINTS_DICT[name])
R1.base.set_velocity(0, 0, 0)
elif name.startswith('Arm'):
R1.arm.ptp(POINTS_DICT[name])
def moveup(self):
index = int(self.points_list.curselection()[0])
points = listbox_to_list(self.pt_list.get())
if index >= 1:
item = points.pop(index)
points.insert(index-1, item)
listbox_string = ' '.join(points)
self.pt_list.set(listbox_string)
def movedown(self):
index = int(self.points_list.curselection()[0])
points = listbox_to_list(self.pt_list.get())
if index <= len(points)-1:
item = points.pop(index)
points.insert(index+1, item)
listbox_string = ' '.join(points)
self.pt_list.set(listbox_string)
def close_gripper(self, *args):
pass
def open_gripper(self, *args):
pass
class BaseMotionAddition(tk.Toplevel):
u"""Окно добавления движения."""
def __init__(self, parent):
u"""Инициализация класса."""
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.title(u'Движение платформы')
self.resizable(0, 0)
self.frm = ttk.Frame(self)
self.frm.grid(column=0, row=0, sticky='nswe')
ttk.Label(self.frm,
text=u'Имя точки:').grid(column=0, row=0, sticky='e')
# Point's name
self.point_name = tk.StringVar()
ttk.Entry(self.frm,
textvariable=self.point_name).grid(column=1,
row=0,
sticky='w')
# X coordinate
ttk.Label(self.frm,
text=u'X:',
width=3).grid(column=3, row=0, sticky='e')
self.X = ttk.Entry(self.frm)
self.X.grid(column=4, row=0, sticky='w')
# Y coordinate
ttk.Label(self.frm,
text=u'Y:',
width=3).grid(column=3, row=1, sticky='e')
self.Y = ttk.Entry(self.frm)
self.Y.grid(column=4, row=1, sticky='w')
# Orientation
ttk.Label(self.frm,
text=u'\u03c6:',
width=3).grid(column=3, row=2, sticky='e')
self.Phi = ttk.Entry(self.frm)
self.Phi.grid(column=4, row=2, sticky='w')
# Touch Up! button
ttk.Button(self.frm,
text='Touch Up',
command=self.touch_up).grid(column=4, row=3)
# Save button
save_button = ttk.Button(self.frm, text=u'Сохранить', command=self.save)
save_button.grid(row=3, column=0)
# Cancel button
cancel_button = ttk.Button(self.frm,
text=u'Отмена',
command=self.cancel)
cancel_button.grid(row=3, column=1)
for child in self.frm.winfo_children():
child.grid_configure(padx=5, pady=5)
def cancel(self):
u"""Закрывает окно, не сохраняя результат."""
self.destroy()
def save(self):
u"""Сохраняет точку в список точек."""
points_list = listbox_to_list(self.parent.pt_list.get())
name = 'Base:{}'.format(self.point_name.get())
x = self.X.get()
y = self.Y.get()
phi = self.Phi.get()
if self.input_is_valid(name, x, y, phi):
POINTS_DICT[name] = (float(x), float(y), radians(float(phi)))
points_list.append(name)
listbox_string = ' '.join(points_list)
self.parent.pt_list.set(listbox_string)
self.destroy()
else:
tkMessageBox.showerror(u"Ошибка добавления точки.",
u"Проверьте поля ввода.")
def touch_up(self):
u"""Записывает текущие координаты базы в поля ввода координат."""
odometry = R1.base.get_odometry()
self.X.insert(0, odometry[0])
self.Y.insert(0, odometry[1])
self.Phi.insert(0, degrees(odometry[2]))
def input_is_valid(self, name, x, y, phi):
u"""Check input data for validity."""
name_ok = name not in POINTS_DICT.keys()
x_ok = isfloat(self.X.get())
y_ok = isfloat(self.Y.get())
phi_ok = isfloat(self.Phi.get())
if name_ok and x_ok and y_ok and phi_ok:
return True
else:
return False
class ArmMotionAddition(tk.Toplevel):
u"""Window that add arm motion to points lists."""
def __init__(self, parent):
u"""Class constructor."""
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.title(u'Движение манипулятора')
self.resizable(0, 0)
frame = ttk.Frame(self)
frame.grid(row=0, column=0, sticky='nswe')
# Coordinates
coordinates = ttk.LabelFrame(frame,
text=u"Введите координаты и углы ориентации")
coordinates.grid(row=0, column=0, columnspan=3, sticky='nswe')
# X
self.X = tk.StringVar()
ttk.Label(coordinates, text=u"X:").grid(row=0, column=0)
x_input = ttk.Entry(coordinates, textvariable=self.X)
x_input.grid(row=0, column=1)
# Y
self.Y = tk.StringVar()
ttk.Label(coordinates, text=u"Y:").grid(row=1, column=0)
y_input = ttk.Entry(coordinates, textvariable=self.Y)
y_input.grid(row=1, column=1)
# Z
self.Z = tk.StringVar()
ttk.Label(coordinates, text=u"Z:").grid(row=2, column=0)
z_input = ttk.Entry(coordinates, textvariable=self.Z)
z_input.grid(row=2, column=1)
# W
self.W = tk.StringVar()
ttk.Label(coordinates, text=u"W:").grid(row=0, column=2)
w_input = ttk.Entry(coordinates, textvariable=self.W)
w_input.grid(row=0, column=3)
# O
self.O = tk.StringVar()
ttk.Label(coordinates, text=u"O:").grid(row=1, column=2)
o_input = ttk.Entry(coordinates, textvariable=self.O)
o_input.grid(row=1, column=3)
# Name
self.point_name = tk.StringVar()
ttk.Label(coordinates, text=u"Имя:").grid(row=2, column=2)
name_input = ttk.Entry(coordinates, textvariable=self.point_name)
name_input.grid(row=2, column=3)
# Configuration
configuration = ttk.LabelFrame(frame, text=u"Выберите конфигурацию")
configuration.grid(row=1, column=0, columnspan=3, sticky='nswe')
self.elbow = tk.IntVar()
self.oriset = tk.IntVar()
ttk.Radiobutton(configuration,
text=u"Локоть вверх",
variable=self.elbow,
value=0).grid(row=0, column=0)
ttk.Radiobutton(configuration,
text=u"Локоть вниз",
variable=self.elbow,
value=1).grid(row=1, column=0)
ttk.Radiobutton(configuration,
text=u"Прямое плечо",
variable=self.oriset,
value=0).grid(row=0, column=1)
ttk.Radiobutton(configuration,
text=u"Обратное плечо",
variable=self.oriset,
value=1).grid(row=1, column=1)
ttk.Button(configuration,
text="Текущие координаты",
command=self.touch_up).grid(row=0,
column=2)
ttk.Button(frame,
text="Move Arm",
command=self.move_arm).grid(row=2,
column=1)
# Save
ttk.Button(frame,
text=u"Сохранить",
command=self.save).grid(row=2,
column=2)
# Cancel
ttk.Button(frame,
text=u"Отмена",
command=self.cancel).grid(row=2,
column=0)
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in frame.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in coordinates.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in configuration.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in gripper.winfo_children():
child.grid_configure(padx=5, pady=5)
def save(self):
u"""Save arm position to points list."""
if self.input_is_valid():
points_list = listbox_to_list(self.parent.pt_list.get())
name = 'Arm:{}'.format(self.point_name.get())
x = self.X.get()
y = self.Y.get()
z = self.Z.get()
w = self.W.get()
ori = self.oriset.get()
elbow = self.elbow.get()
try:
point = rospyoubot._joints_angles_for_pose(x, y, z, w, ori,
elbow)
POINTS_DICT[name] = point
points_list.append(name)
listbox_string = ' '.join(points_list)
self.parent.pt_list.set(listbox_string)
self.destroy()
except ValueError:
tkMessageBox.showerror(u"Ошибка добавления точки.",
u"Точка недостижима")
else:
tkMessageBox.showerror(u"Ошибка добавления точки.",
u"Проверьте поля ввода.")
def move_arm(self):
u"""Move arm to entered coordinates."""
pass
def touch_up(self):
u"""Save current joints angles as point."""
joints = R1.arm.get_current_joints_positions()
x, y, z, w, o = rospyoubot._joints_positions_to_cartesian(self.oriset.get(),
*joints)
self.X.set(x)
self.Y.set(y)
self.Z.set(z)
self.W.set(w)
self.O.set(o)
def cancel(self):
u"""Закрывает окно, не сохраняя результат."""
self.destroy()
def input_is_valid(self):
u"""Check if all inputs are valid."""
x_ok = isfloat(self.X.get())
y_ok = isfloat(self.Y.get())
z_ok = isfloat(self.Z.get())
w_ok = isfloat(self.W.get())
o_ok = isfloat(self.O.get())
name = 'Arm:' + self.point_name.get()
name_not_empty = self.point_name.get() != ''
name_ok = name not in POINTS_DICT.keys()
everything_ok = (x_ok and y_ok and z_ok and w_ok and o_ok and name_ok
and name_not_empty)
if everything_ok:
return True
else:
return False
def key_pressed(event):
u"""Обрабатывает нажатие на кнопку клавиатуры."""
# Base movement
if event.char == 'i':
R1.base.set_velocity(lin_x=BASE_VELOCITY)
elif event.char == 'k':
R1.base.set_velocity(lin_x=-BASE_VELOCITY)
elif event.char == 'j':
R1.base.set_velocity(lin_y=BASE_VELOCITY)
elif event.char == 'l':
R1.base.set_velocity(lin_y=-BASE_VELOCITY)
elif event.char == 'u':
R1.base.set_velocity(ang_z=BASE_VELOCITY)
elif event.char == 'o':
R1.base.set_velocity(ang_z=-BASE_VELOCITY)
# Arm movement
if event.char == 'q':
R1.arm.set_joints_velocities(1, 0, 0, 0, 0)
elif event.char == 'Q':
R1.arm.set_joints_velocities(-1, 0, 0, 0, 0)
if event.char == 'w':
R1.arm.set_joints_velocities(0, 1, 0, 0, 0)
elif event.char == 'W':
R1.arm.set_joints_velocities(0, -1, 0, 0, 0)
if event.char == 'e':
R1.arm.set_joints_velocities(0, 0, 1, 0, 0)
elif event.char == 'E':
R1.arm.set_joints_velocities(0, 0, -1, 0, 0)
if event.char == 'r':
R1.arm.set_joints_velocities(0, 0, 0, 1, 0)
elif event.char == 'R':
R1.arm.set_joints_velocities(0, 0, 0, -1, 0)
if event.char == 't':
R1.arm.set_joints_velocities(0, 0, 0, 0, 1)
elif event.char == 'T':
R1.arm.set_joints_velocities(0, 0, 0, 0, -1)
if event.char == 'G':
R1.arm.gripper.set_gripper_state(True)
if event.char == 'g':
R1.arm.gripper.set_gripper_state(False)
def key_released(event):
u"""Обрабатывает отпускание кнопки клавиатуры."""
R1.base.set_velocity()
R1.arm.set_joints_velocities(0, 0, 0, 0, 0)
def update_joints_labels():
u"""бновляет данные о текущем угле поворота осей и одометрии базы."""
current_joints_positions = list(R1.arm.get_current_joints_positions())
odom = R1.base.get_odometry()
for index, value in enumerate(odom):
ODOMETRY[index].set(round(value, 3))
for index, value in enumerate(current_joints_positions):
ARM_JOINTS_ANGLES[index].set(round(degrees(value), 3))
ROOT.after(100, update_joints_labels)
def listbox_to_list(listbox_str):
u"""Convert listbox string into list."""
string = listbox_str[1:-1]
list_from_string = string.split()
striped_list = [item.strip(",'") for item in list_from_string]
return striped_list
def isfloat(string):
u"""Return True if string can be converted into float."""
try:
float(string)
return True
except ValueError:
return False
if __name__ == '__main__':
ROOT = tk.Tk()
ROOT.title("youBot control")
ROOT.resizable(1, 0)
ROOT.columnconfigure(0, weight=1)
BASE_VELOCITY = 0.2
ARM_VELOCITY = 1
R1 = rospyoubot.YouBot()
ARM_JOINTS_ANGLES = [tk.StringVar() for _ in range(5)]
ODOMETRY = [tk.StringVar() for _ in range(3)]
POINTS_DICT = {}
MAINFRAME = MainApplication(ROOT)
ROOT.update()
ROOT.minsize(ROOT.winfo_width(), ROOT.winfo_height())
ROOT.bind('<Key>', key_pressed)
ROOT.bind('<KeyRelease>', key_released)
ROOT.after(100, update_joints_labels)
ROOT.mainloop()
|
bsd-2-clause
| 1,213,052,310,638,447,400
| 38.376276
| 84
| 0.535551
| false
| 3.097632
| true
| false
| false
|
adrienpacifico/openfisca-france
|
openfisca_france/model/prelevements_obligatoires/prelevements_sociaux/cotisations_sociales/exonerations.py
|
1
|
24644
|
# -*- coding: utf-8 -*-
from __future__ import division
from numpy import datetime64, maximum as max_, minimum as min_, round as round_, timedelta64
from ....base import * # noqa analysis:ignore
from .base import apply_bareme_for_relevant_type_sal
class jei_date_demande(Variable):
column = DateCol(default = date(2099, 12, 31))
entity_class = Individus
label = u"Date de demande (et d'octroi) du statut de jeune entreprise innovante (JEI)"
class exoneration_cotisations_employeur_geographiques(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonérations de cotisations employeur dépendant d'une zone géographique"
url = "https://www.apce.com/pid815/aides-au-recrutement.html?espace=1&tp=1"
def function(self, simulation, period):
exoneration_cotisations_employeur_zfu = simulation.calculate_add('exoneration_cotisations_employeur_zfu',
period)
exoneration_cotisations_employeur_zrd = simulation.calculate_add('exoneration_cotisations_employeur_zrd',
period)
exoneration_cotisations_employeur_zrr = simulation.calculate_add('exoneration_cotisations_employeur_zrr',
period)
exonerations_geographiques = (exoneration_cotisations_employeur_zfu + exoneration_cotisations_employeur_zrd +
exoneration_cotisations_employeur_zrr)
return period, exonerations_geographiques
class exoneration_cotisations_employeur_jei(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour une jeune entreprise innovante"
url = "http://www.apce.com/pid1653/jeune-entreprise-innovante.html?pid=1653&pagination=2"
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
jei_date_demande = simulation.calculate('jei_date_demande', period)
jeune_entreprise_innovante = simulation.calculate('jeune_entreprise_innovante', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
smic_proratise = simulation.calculate('smic_proratise', period)
type_sal = simulation.calculate('type_sal', period)
bareme_by_type_sal_name = simulation.legislation_at(period.start).cotsoc.cotisations_employeur
bareme_names = ['vieillesse_deplafonnee', 'vieillesse_plafonnee', 'maladie', 'famille']
exoneration = smic_proratise * 0.0
for bareme_name in bareme_names:
exoneration += apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = bareme_by_type_sal_name,
bareme_name = bareme_name,
type_sal = type_sal,
base = min_(assiette_allegement, 4.5 * smic_proratise),
plafond_securite_sociale = plafond_securite_sociale,
round_base_decimals = 2,
)
exoneration_relative_year_passed = exoneration_relative_year(period, jei_date_demande)
rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
} # TODO: move to legislation parameters file
for year_passed, rate in rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
exoneration[exoneration_relative_year_passed == year_passed] = rate * exoneration
return period, - exoneration * jeune_entreprise_innovante
class exoneration_cotisations_employeur_zfu(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour l'embauche en zone franche urbaine (ZFU)"
url = "http://www.apce.com/pid553/exoneration-dans-les-zfu.html?espace=1&tp=1&pagination=2"
# TODO
# Ce dispositif d'exonération sociale est fermé depuis le 1er janvier 2015 mais reste applicable aux entreprises qui
# en bénéficiaient avant cette date.
# - ne pas être détenues à plus de 25 % par des entreprises employant plus de 250 salariés et dont le chiffre d'affaires
# ou dont le bilan excède 50 M€ ou 43 M€,
# - disposer des éléments d'exploitation ou des stocks nécessaires à l'activité des salariés,
# - être à jour de ses cotisations sociales ou avoir souscrit à un engagement d'apurement progressif de ses dettes.
#
# Secteurs d'activité concernés
#
# L'exonération est applicable, quel que soit le secteur d'activité.
# Toutefois, les entreprises exerçant une activité principale dans les secteurs de la construction automobile,
# construction navale, fabrication de fibres textiles artificielles ou synthétiques, sidérurgie ou des transports
# routiers de marchandises, ne pourront pas bénéficier de cette exonération.
# Embauche de résidents (clause d'embauche locale)
# Pour les entreprises qui se créent ou s'implantent dans une ZFU à compter du 1er janvier 2012, le bénéfice de
# l'exonération des cotisations sociales est subordonnée lors de toute nouvelle embauche à la condition que la moitié
# de salariés embauchés ou employés résident en ZFU ou en zone urbaine sensible.
#
# Le respect de la condition d'embauche locale est apprécié à la date d'effet de la nouvelle embauche dès la deuxième
# embauche.
#
# Précision : les salariés employés sont ceux déjà présents dans l'entreprise à la date de la nouvelle embauche, les
# salariés embauchés sont ceux recrutés depuis la date de création ou d'implantation de l'entreprise en ZFU.
#
# Est considéré comme résident le salarié habitant soit dans la ZFU d'implantation, soit dans l'une des ZUS de l'unité
# urbaine où se trouve la ZFU. Le maire peut, à la demande de l'employeur, fournir des éléments d'informations relatifs
# à la qualité de résident dans la zone afin de déterminer si la proportion exigée est respectée.
#
# Si la proportion n'est pas respectée à la date d'effet de l'embauche, l'employeur dispose d'un délai de 3 mois pour
# régulariser la situation. A défaut, le bénéfice de l'exonération est suspendu du 1er jour du mois suivant
# l'expiration du délai de 3 mois, jusqu'au 1er jour du mois suivant la date où la condition est de nouveau remplie.
#
# Le salarié résident doit être titulaire d'un contrat à durée indéterminée ou d'un contrat à durée déterminée d'au
# moins 12 mois, conclu pour une durée minimale de 16 heures par semaine.
# 5 ans +
# Dans les entreprises de 5 salariés et plus, les cotisations employeur bénéficient d'un abattement sur la base
# imposable pendant 3 ans de :
# - 60 % la première année,
# - 40 % la seconde année,
# - 20 % la troisième année.
#
# Dans les entreprises de moins de 5 salariés, un abattement est appliqué sur 9 ans de la manière suivante :
# - 60 % les 5 premières années,
# - 40 % les 2 années suivantes,
# - 20 % les deux dernières années.
#
# Le cumul de l'ensemble des aides publiques de minimis (allégements fiscaux, sociaux et aides des collectivités
# territoriales) ne peut dépasser le plafond des aides de minimis, fixé à 200 000 euros sur une période glissante de 36
# mois (100 000 euros pour les entreprises de transport routier).
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
contrat_de_travail_duree = simulation.calculate('contrat_de_travail_duree', period) # 0: CDI, 1:CDD
contrat_de_travail_debut = simulation.calculate('contrat_de_travail_debut', period)
contrat_de_travail_fin = simulation.calculate('contrat_de_travail_fin', period)
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
entreprise_chiffre_affaire = simulation.calculate('entreprise_chiffre_affaire', period)
entreprise_bilan = simulation.calculate('entreprise_bilan', period)
smic_proratise = simulation.calculate('smic_proratise', period)
taux_versement_transport = simulation.calculate('taux_versement_transport', period)
# TODO: move to legislation parameters file
entreprise_eligible = (entreprise_chiffre_affaire <= 1e7) | (entreprise_bilan <= 1e7)
smic_proratise = simulation.calculate('smic_proratise', period)
zone_franche_urbaine = simulation.calculate('zone_franche_urbaine', period)
duree_cdd_eligible = (contrat_de_travail_fin > contrat_de_travail_debut + timedelta64(365, 'D'))
# TODO: move to legislation parameters file
contrat_de_travail_eligible = (contrat_de_travail_debut <= datetime64("2014-12-31")) * (
(contrat_de_travail_duree == 0) + (
(contrat_de_travail_duree == 1) * (duree_cdd_eligible)
)
)
# TODO: move to legislation parameters file
eligible = (
contrat_de_travail_eligible *
(effectif_entreprise <= 50) *
zone_franche_urbaine *
entreprise_eligible
)
bareme_by_name = simulation.legislation_at(period.start).cotsoc.cotisations_employeur['prive_non_cadre']
taux_max = (
bareme_by_name['vieillesse_deplafonnee'].rates[0] +
bareme_by_name['vieillesse_plafonnee'].rates[0] +
bareme_by_name['maladie'].rates[0] +
bareme_by_name['famille'].rates[0] +
bareme_by_name['fnal1'].rates[0] +
bareme_by_name['fnal2'].rates[0] * (effectif_entreprise >= 20) +
taux_versement_transport
)
# TODO: move to legislation parameters file : voir http://www.urssaf.fr/images/ref_lc2009-077.pdf
seuil_max = 2
seuil_min = 1.4
taux_exoneration = compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min)
exoneration_relative_year_passed = exoneration_relative_year(period, contrat_de_travail_debut)
large_rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: .60,
6: .40,
7: .20,
} # TODO: move to legislation parameters file
small_rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: .60,
6: .60,
7: .60,
8: .60,
9: .60,
10: .40,
11: .40,
12: .20,
13: .20,
} # TODO: move to legislation parameters file
large_taux_exoneration = eligible * 0.0
small_taux_exoneration = eligible * 0.0
for year_passed, rate in large_rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
large_taux_exoneration[exoneration_relative_year_passed == year_passed] = rate * taux_exoneration
for year_passed, rate in small_rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
small_taux_exoneration[exoneration_relative_year_passed == year_passed] = rate * taux_exoneration
exoneration_cotisations_zfu = eligible * assiette_allegement * (
small_taux_exoneration * (effectif_entreprise <= 5) +
large_taux_exoneration * (effectif_entreprise > 5)
)
return period, exoneration_cotisations_zfu
# TODO: propager dans le temps
class exoneration_cotisations_employeur_zrd(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour l'embauche en zone de restructuration de la Défense (ZRD)"
url = "http://www.apce.com/pid11668/exoneration-dans-les-zrd.html?espace=1&tp=1"
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
entreprise_creation = simulation.calculate('entreprise_creation', period)
smic_proratise = simulation.calculate('smic_proratise', period)
zone_restructuration_defense = simulation.calculate('zone_restructuration_defense', period)
eligible = zone_restructuration_defense
taux_max = .281 # TODO: move to legislation parameters file
seuil_max = 2.4
seuil_min = 1.4
taux_exoneration = compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min)
exoneration_relative_year_passed = exoneration_relative_year(period, entreprise_creation)
rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 2 / 3,
4: 1 / 3,
} # TODO: move to legislation parameters file
ratio = eligible * 0.0
for year_passed, rate in rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
ratio[exoneration_relative_year_passed == year_passed] = rate
exoneration_cotisations_zrd = ratio * taux_exoneration * assiette_allegement * eligible
return period, exoneration_cotisations_zrd
class exoneration_cotisations_employeur_zrr(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour l'embauche en zone de revitalisation rurale (ZRR)"
url = "http://www.apce.com/pid538/embauches-en-zru-et-zrr.html?espace=1&tp=1"
# Les entreprises et groupements d'employeurs exerçant une activité industrielle, commerciale, artisanale, agricole
# ou libérale et cotisant au régime d'assurance chômage.
# Les entreprises concernées, y compris chacune de celles appartenant à un groupement d'employeurs, doivent avoir
# au moins un établissement situé en zone de revitalisation rurale.
#
# A noter : les associations à but non lucratif sont exclues du dispositif. Par contre, quelle que soit leur forme
# juridique, les entreprises d'insertion ou d'intérim d'insertion peuvent en bénéficier. Les régies de quartier
# peuvent en bénéficier lorsque leur activité est susceptible d'entraîner l'assujettissement à la TVA à l'impôt sur
# les sociétés ainsi qu'à la contribution économique territoriale qu'elles en soient effectivement redevables
# ou non.
#
# L'employeur ne doit avoir procédé à aucun licenciement économique durant les 12 mois précédant l'embauche.
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
contrat_de_travail_duree = simulation.calculate('contrat_de_travail_duree', period) # 0: CDI, 1:CDD
contrat_de_travail_debut = simulation.calculate('contrat_de_travail_debut', period)
contrat_de_travail_fin = simulation.calculate('contrat_de_travail_fin', period)
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
smic_proratise = simulation.calculate('smic_proratise', period)
zone_revitalisation_rurale = simulation.calculate('zone_revitalisation_rurale', period)
duree_cdd_eligible = contrat_de_travail_fin > contrat_de_travail_debut + timedelta64(365, 'D')
# TODO: move to legislation parameters file
contrat_de_travail_eligible = (
contrat_de_travail_duree == 0) + (
(contrat_de_travail_duree == 1) * (duree_cdd_eligible)
)
duree_validite = (
datetime64(period.start) + timedelta64(1, 'D') - contrat_de_travail_debut).astype('timedelta64[Y]') < 1
eligible = (
contrat_de_travail_eligible *
(effectif_entreprise <= 50) *
zone_revitalisation_rurale *
duree_validite
)
taux_max = .281 if period.start.year < 2015 else .2655 # TODO: move to legislation parameters file
seuil_max = 2.4
seuil_min = 1.5
taux_exoneration = compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min)
exoneration_cotisations_zrr = taux_exoneration * assiette_allegement * eligible
return period, exoneration_cotisations_zrr
# Aides à la création
class exoneration_is_creation_zrr(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations fiscales pour création d'une entreprise en zone de revitalisation rurale (ZRR)"
url = 'http://www.apce.com/pid11690/exonerations-d-impots-zrr.html?espace=1&tp=1'
def function(self, simulation, period):
period = period.this_year
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
entreprise_benefice = simulation.calculate('entreprise_benefice', period)
# TODO: MODIFIER avec création d'entreprise
contrat_de_travail_duree = simulation.calculate('contrat_de_travail_duree', period) # 0: CDI, 1:CDD
contrat_de_travail_debut = simulation.calculate('contrat_de_travail_debut', period)
contrat_de_travail_fin = simulation.calculate('contrat_de_travail_fin', period)
duree_eligible = contrat_de_travail_fin > contrat_de_travail_debut + timedelta64(365, 'D')
# TODO: move to legislation parameters file
contrat_de_travail_eligible = (
contrat_de_travail_duree == 0) + (
(contrat_de_travail_duree == 1) * (duree_eligible)
)
zone_revitalisation_rurale = simulation.calculate('zone_revitalisation_rurale', period)
eligible = (
contrat_de_travail_eligible *
(effectif_entreprise <= 50) *
zone_revitalisation_rurale
)
exoneration_relative_year_passed = exoneration_relative_year(period, contrat_de_travail_debut)
rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: .75,
6: .50,
7: .25,
} # TODO: move to legislation parameters file
taux_exoneraion = eligible * 0.0
for year_passed, rate in rate_by_year_passed.iteritems():
taux_exoneraion[exoneration_relative_year_passed == year_passed] = rate
return period, taux_exoneraion * entreprise_benefice
# TODO: mettre sur toutes les années
# # class bassin_emploi_redynamiser(Variable):
# column = BoolCol
# entity_class = Individus
# label = u"L'entreprise est située danns un bassin d'emploi à redynamiser(BER)"
# # La liste des bassins d'emploi à redynamiser a été fixée par le décret n°2007-228 du 20 février 2007.
# # Actuellement, deux régions sont concernées : Champagne-Ardenne (zone d'emploi de la Vallée de la Meuse)
# # et Midi-Pyrénées (zone d'emploi de Lavelanet).
#
# def function(self, simulation, period):
# effectif_entreprise = simulation.calculate('effectif_entreprise', period)
# return period, (effectif_entreprise >= 1) * False
class jeune_entreprise_innovante(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est une jeune entreprise innovante"
def function(self, simulation, period):
# Toute entreprise existante au 1er janvier 2004 ou créée entre le 1er janvier 2004 et le 31 décembre 2016 à
# condition de remplir les conditions suivantes :
#
# avoir moins de 8 ans d'existence au moment de la demande
#
# être réellement nouvelle, c'est-à-dire ne pas avoir été créée dans le cadre d'une concentration,
# d'une restructuration, d'une extension d'activité préexistante ou d'une reprise
#
# employer moins de 250 personnes au cours de l'exercice au titre duquel elle demande à bénéficier de ce statut
#
# réaliser un chiffre d'affaires inférieur à 50 M€ et disposer d'un total de bilan inférieur à 43 M€
#
# être indépendante, c'est-à-dire que son capital doit être détenu pour 50 % au minimum par :
#
# - des personnes physiques
#
# - une ou plusieurs autres JEI dont 50 % du capital au moins est détenu par des personnes physiques
#
# - des associations ou fondations reconnues d'utilité publique à caractère scientifique
#
# - des établissements de recherche et d'enseignement et leurs filiales
#
# - des structures d'investissement sous réserve qu'il n'y ait pas de lien de dépendance telles que des :
# - fonds communs de placement dans l'innovation (FCPI)
# - sociétés de capital-risque
# - fonds d'investissement de proximité (FIP)
# - sociétés de développement régional (SDR)
# - sociétés financières d'innovation (SFI)
# - sociétés unipersonnelles d'investissements à risques (SUIR).
#
# réaliser des dépenses de R§D représentant au moins 15 % des charges fiscalement déductibles au titre du même
# exercice.
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
entreprise_bilan = simulation.calculate('entreprise_bilan', period)
entreprise_chiffre_affaire = simulation.calculate('entreprise_chiffre_affaire', period)
entreprise_creation = simulation.calculate('entreprise_creation', period)
# entreprise_depenses_rd = simulation.calculate('entreprise_depenses_rd', period)
jei_date_demande = simulation.calculate('jei_date_demande', period)
# TODO: move to legislation parameters file
# entreprise_depenses_rd > .15 TODO
independance = True
jeune_entreprise_innovante = (
independance *
(effectif_entreprise < 250) *
(entreprise_creation <= datetime64("2016-12-31")) *
((jei_date_demande + timedelta64(1, 'D') - entreprise_creation).astype('timedelta64[Y]') < 8) *
(entreprise_chiffre_affaire < 50e6) *
(entreprise_bilan < 43e6)
)
return period, jeune_entreprise_innovante
class bassin_emploi_redynamiser(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située danns un bassin d'emploi à redynamiser (BER)"
# La liste des bassins d'emploi à redynamiser a été fixée par le décret n°2007-228 du 20 février 2007.
# Actuellement, deux régions sont concernées : Champagne-Ardenne (zone d'emploi de la Vallée de la Meuse)
# et Midi-Pyrénées (zone d'emploi de Lavelanet).
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
class zone_restructuration_defense(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située dans une zone de restructuration de la Défense (ZRD)"
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
class zone_franche_urbaine(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située danns une zone franche urbaine (ZFU)"
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
class zone_revitalisation_rurale(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située dans une zone de revitalisation rurale (ZRR)"
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
# Helpers
def compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min = 1):
ratio_smic_salaire = smic_proratise / (assiette_allegement + 1e-16)
# règle d'arrondi: 4 décimales au dix-millième le plus proche ( # TODO: reprise de l'allègement Fillon unchecked)
return round_(
taux_max * min_(1, max_(seuil_max * seuil_min * ratio_smic_salaire - seuil_min, 0) / (seuil_max - seuil_min)),
4,
)
def exoneration_relative_year(period, other_date):
return (datetime64(period.start) + timedelta64(1, 'D') - other_date).astype('timedelta64[Y]')
|
agpl-3.0
| 2,386,070,103,312,394,000
| 47.772
| 120
| 0.672517
| false
| 2.926437
| false
| false
| false
|
Niddel/magnet-api2-sdk-python
|
magnetsdk2/cef.py
|
1
|
4911
|
# -*- coding: utf-8 -*-
"""
This module implements writing CEF format events.
"""
from math import ceil, trunc
import six
from magnetsdk2.time import seconds_from_UTC_epoch
def escape_header_entry(x):
"""
Escapes backslashes and pipes from a header entry.
:param x: the string value to escape
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
if not isinstance(x, six.string_types):
x = x.__str__()
return x.replace('\\', '\\\\').replace('|', '\\|').strip()
def header(device_vendor, device_product, device_version, signature_id, name, severity):
"""
Builds a CEF version 0 header with the given fields
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
if isinstance(severity, float):
severity = trunc(severity)
if isinstance(severity, six.integer_types):
if severity < 0 or severity > 10:
raise ValueError('severity must be between 0 and 10')
severity = '{0:d}'.format(severity)
return '|'.join(map(escape_header_entry,
['CEF:0', device_vendor, device_product, device_version, signature_id, name,
severity, ''])).strip()
def escape_extension_value(x):
"""
Escapes backslashes, pipes, equals signs and newlines from an extension entry value.
:param x: the string value to escape
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
if not isinstance(x, six.string_types):
x = x.__str__()
return x.replace('\\', '\\\\').replace('=', '\\=').replace('\n', '\\n').replace('\r',
'\\r').strip()
def extension(fields):
"""
Builds a CEF version 0 extension with the given fields. Fields will be sorted by name.
:param fields: dict containing fields to include
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
fields = sorted([(k, v) for k, v in six.iteritems(fields) if v], key=lambda x: x[0])
return ' '.join([e[0].strip() + '=' + escape_extension_value(e[1])
for e in fields]).strip()
def timestamp(ts):
"""
Converts an ISO date and time in UTC into milliseconds from epoch as expected by CEF format.
:param ts: string containing the date and time in ISO 8601 format
:return: number of milliseconds since epoch
"""
if not ts:
return None
if not ts.endswith('Z'):
ts = ts + 'Z'
return '{0:d}'.format(trunc(seconds_from_UTC_epoch(ts) * 1000))
def convert_alert_cef(obj, alert, organization):
"""
Converts a Niddel Magnet v2 API alert into an approximate CEF version 0 representation.
:param obj: file-like object in binary mode to write to
:param alert: dict containing a Niddel Magnet v2 API
:return: an str / bytes object containing a CEF event
"""
obj.write(header(device_vendor='Niddel', device_product='Magnet', device_version='1.0',
signature_id='infected_outbound',
name='Potentially Infected or Compromised Endpoint',
severity=max(ceil(alert['confidence'] / 10), 0)).encode('UTF-8'))
ext = {
'cs1': organization,
'cs1Label': 'organizationId',
'cs2': alert['batchDate'],
'cs2Label': 'batchDate',
'start': timestamp(alert['logDate'] + 'T' + alert['aggFirst']),
'end': timestamp(alert['logDate'] + 'T' + alert['aggLast']),
'externalId': alert['id'],
'cfp1': alert['confidence'],
'cfp1Label': 'confidence',
'cnt': alert['aggCount'],
'shost': alert.get('netSrcIpRdomain', None),
'src': alert.get('netSrcIp', None),
'dst': alert.get('netDstIp', None),
'dhost': alert.get('netDstDomain', None),
'dpt': alert.get('netDstPort', None),
'proto': alert.get('netL4proto', None),
'app': alert.get('netL7proto', alert.get('netApp', None)),
'suid': alert.get('netSrcUser', None),
'deviceCustomDate1': timestamp(alert.get('createdAt', None)),
'deviceCustomDate1Label': 'createdAt',
'deviceCustomDate2': timestamp(alert.get('updatedAt', None)),
'deviceCustomDate2Label': 'updatedAt',
'deviceDirection': 1,
'dtz': 'GMT'
}
if 'netBlocked' in alert:
if alert['netBlocked']:
ext['act'] = 'allow'
else:
ext['act'] = 'deny'
if 'tags' in alert:
ext['cs3'] = ','.join(sorted(alert['tags']))
ext['cs3Label'] = 'tags'
if 'netDeviceTypes' in alert:
ext['cs4'] = ','.join(sorted(alert['netDeviceTypes']))
ext['cs4Label'] = 'netDeviceTypes'
if 'netSrcProcessId' in alert:
ext['cs5'] = alert['netSrcProcessId']
ext['cs5Label'] = 'netSrcProcessId'
# merge header and extension
obj.write(extension(ext).encode('UTF-8'))
|
apache-2.0
| -9,034,116,061,246,250,000
| 35.377778
| 100
| 0.589086
| false
| 3.789352
| false
| false
| false
|
pombredanne/metamorphosys-desktop
|
metamorphosys/META/3rdParty/ctemplate-1.0/src/htmlparser/generate_fsm.py
|
1
|
10998
|
#!/usr/bin/env python
#
# Copyright (c) 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---
#
# Generate a C include file from a finite state machine definition.
#
# Right now the form is the one expected by htmlparser.c so this file is pretty
# tightly coupled with htmlparser.c.
#
__author__ = 'falmeida@google.com (Filipe Almeida)'
import sys
from fsm_config import FSMConfig
class FSMGenerateAbstract(object):
def __init__(self, config):
self._config = config
def Generate(self):
"""Returns the generated FSM description for the specified language.
Raises a TypeError, because abstract methods can not be called.
Raises:
TypeError
"""
raise TypeError('Abstract method %s.%s called' % (self._class.__name__,
self._function))
class FSMGenerateC(FSMGenerateAbstract):
"""Generate the C definition from a statemachien configuration object."""
TABSTOP_ = 2
def _Prefix(self):
"""Return a c declaration prefix."""
return self._config.name.lower() + '_'
def _StateInternalC(self, st):
"""Return the internal name of the state."""
return '%sSTATE_INT_%s' % (self._Prefix().upper(), st.upper())
def _StateExternalC(self, st):
"""Return the external name of the state."""
return '%sSTATE_%s' % (self._Prefix().upper(), st.upper())
def _MakeTuple(self, data):
"""Converts data to a string representation of a C tuple."""
return '{ %s }' % ', '.join(data)
def _CreateHeader(self):
"""Print the include file header."""
out = []
if self._config.comment:
out.append('/* ' + self._config.comment)
else:
out.append('/* State machine definition for ' + self._config.name)
out.append(' * Auto generated by generate_fsm.py. Please do not edit.')
out.append(' */')
return '\n'.join(out)
def _ListToIndentedString(self, list):
indented_list = [' ' + e for e in list]
return ',\n'.join(indented_list)
def _CreateEnum(self, name, data):
"""Print a c enum definition."""
return 'enum %s {\n%s\n};\n' % (name,
self._ListToIndentedString(data))
def _CreateStructList(self, name, type, data):
"""Print a c flat list.
Generic function to print list in c in the form of a struct.
Args:
name: name of the structure.
type: type of the struct.
data: contents of the struct as a list of elements
Returns:
String with the generated list.
"""
return "static const %s %s[] = {\n%s\n};\n" % (
type,
name,
self._ListToIndentedString(data))
def _CreateStatesEnum(self):
"""Print the internal states enum.
Prints an enum containing all the valid states.
Returns:
String containing a C enumeration of the states.
"""
list = [] # output list
for state in self._config.states:
list.append(self._StateInternalC(state))
return self._CreateEnum(self._Prefix() + 'state_internal_enum', list)
def _CreateStatesExternal(self):
"""Print a struct with a mapping from internal to external states."""
list = [] # output list
for state_name in self._config.states:
list.append(self._StateExternalC(
self._config.states[state_name].external_name))
return self._CreateStructList(self._Prefix() + 'states_external',
'int',
list)
def _CreateStatesInternalNames(self):
"""Return a struct mapping internal states to a strings."""
out = [] # output list
for state_name in self._config.states:
out.append('"' + state_name + '"')
return self._CreateStructList(self._Prefix() + 'states_internal_names',
'char *',
out)
def _CreateNumStates(self):
"""Print a Macro defining the number of states."""
return "#define %s_NUM_STATES %s" % (self._config.name.upper(),
str(len(self._config.states) + 1))
def _ExpandBracketExpression(self, expression):
"""Expand ranges in a regexp bracket expression.
Returns a string with the ranges in a bracket expression expanded.
The bracket expression is similar to grep(1) or regular expression bracket
expressions but it does not support the negation (^) modifier or named
character classes like [:alpha:] or [:alnum:].
The especial character class [:default:] will expand to all elements in the
ascii range.
For example, the expression 'a-c13A-D' will expand to 'abc13ABCD'.
Args:
expression: A regexp bracket expression. Ie: 'A-Z0-9'.
Returns:
A string with the ranges in the bracket expression expanded.
"""
def ExpandRange(start, end):
"""Return a sequence of characters between start and end.
Args:
start: first character of the sequence.
end: last character of the sequence.
Returns:
string containing the sequence of characters between start and end.
"""
return [chr(c) for c in range(ord(start), ord(end) + 1)]
def ListNext(input_list):
"""Pop the first element of a list.
Args:
input_list: python list object.
Returns:
First element of the list or None if the list is empty.
"""
if input_list:
return input_list.pop(0)
else:
return None
out = [] # List containing the output
# Special case for the character class [:default:]
if expression == '[:default:]':
out = [chr(c) for c in range(0, 255)]
return ''.join(out)
chars = [c for c in expression] # list o characters in the expression.
current = ListNext(chars)
while current:
next = ListNext(chars)
if next == '-':
next = ListNext(chars)
if next:
out.extend(ExpandRange(current, next))
else:
out.append(current)
out.append('-')
current = ListNext(chars)
else:
out.append(current)
current = next
return ''.join(out)
def _CreateTransitionTable(self):
"""Print the state transition list.
Returns a set of C structures that define the transition table for the state
machine. This structure is a list of lists of ints (int **). The outer list
indexes the source state and the inner list contains the destination state
for each of the possible input characters:
const int * const* transitions[source][input] == destination.
The conditions are mapped from the conditions variable.
Returns:
String containing the generated transition table in a C struct.
"""
out = [] # output list
default_state = 'STATEMACHINE_ERROR'
state_table = {}
for state in self._config.states:
state_table[state] = [default_state for col in xrange(255)]
# We process the transition in reverse order while updating the table.
for i_transition in range(len(self._config.transitions) - 1, -1, -1):
transition = self._config.transitions[i_transition]
(condition_name, src, dst) = (transition.condition,
transition.source,
transition.destination)
condition = self._config.conditions[condition_name]
char_list = self._ExpandBracketExpression(condition)
for c in char_list:
state_table[src][ord(c)] = self._StateInternalC(dst)
# Create the inner lists which map input characters to destination states.
for state in self._config.states:
transition_row = []
for c in xrange(0, 255):
transition_row.append(' /* %06s */ %s' % (repr(chr(c)),
state_table[state][c]))
out.append(self._CreateStructList('%stransition_row_%s' %
(self._Prefix(),
state),
'int',
transition_row))
out.append('\n')
# Create the outer list, which map source states to input characters.
out.append('static const %s %s[] = {\n' % ('int *', self._Prefix() +
'state_transitions'))
row_list = [' %stransition_row_%s' %
(self._Prefix(), row) for row in self._config.states]
out.append(',\n'.join(row_list))
out.append('\n};\n')
return ''.join(out)
def Generate(self):
"""Returns the generated the C include statements for the statemachine."""
print '\n'.join((self._CreateHeader(),
self._CreateNumStates(),
self._CreateStatesEnum(),
self._CreateStatesExternal(),
self._CreateStatesInternalNames(),
self._CreateTransitionTable()))
def main():
if len(sys.argv) != 2:
print "usage: generate_fsm.py config_file"
sys.exit(1)
config = FSMConfig()
config.Load(sys.argv[1])
gen = FSMGenerateC(config)
gen.Generate()
if __name__ == "__main__":
main()
|
mit
| -816,260,982,260,084,900
| 31.327273
| 80
| 0.600382
| false
| 4.301134
| true
| false
| false
|
cameronbwhite/PyOLP
|
PyOLP/paginated.py
|
1
|
3928
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2013, Cameron White
#
# PyGithub is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
class PaginatedList():
def __init__(self, contentClass, requester, uri, parameters=None):
self.__requester = requester
self.__contentClass = contentClass
self.__uri = uri
self.__parameters = parameters
self.__getFirstPage()
def _applyContentClass(self, element):
return self.__contentClass(
self.__requester, self.__headers, element)
def _isBiggerThan(self, index):
return len(self.__elements) > index or self.__couldGrow()
def __couldGrow(self):
if self.__next is None:
return False
else:
return True
def __fetchToIndex(self, index):
while len(self.__elements) <= index and self.__couldGrow():
self.__grow()
def __getFirstPage(self):
headers, data = self.__requester.requestJsonAndCheck(
self.__uri,
self.__parameters
)
self.__elements = self.__parse(headers, data)
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, (int, long)):
self.__fetchToIndex(index)
return self.__elements[index]
else:
return self._Slice(self, index)
def __getNextPage(self):
headers, data = self.__requester.requestJsonAndCheck(
self.__next
)
return self.__parse(headers, data)
def __grow(self):
newElements = self.__getNextPage()
self.__elements += newElements
return newElements
def __iter__(self):
for element in self.__elements:
yield self.__contentClass(
self.__requester, self.__headers, element)
while self.__couldGrow():
self.__grow()
for element in self.__elements:
yield self._applyContentClass(element)
def __parse(self, headers, data):
self.__headers = headers
meta = data["meta"]
self.__limit = meta["limit"]
self.__next = meta["next"]
self.__offset = meta["offset"]
self.__previous = meta["previous"]
self.__total_count = meta["total_count"]
return data["objects"]
class _Slice:
def __init__(self, theList, theSlice):
self.__list = theList
self.__start = theSlice.start or 0
self.__stop = theSlice.stop
self.__step = theSlice.step or 1
def __iter__(self):
index = self.__start
while not self.__finished(index):
if self.__list._isBiggerThan(index):
yield self.__list._applyContentClass(self.__list[index])
index += self.__step
else:
return
def __finished(self, index):
return self.__stop is not None and index >= self.__stop
|
gpl-3.0
| 1,788,661,924,577,692,200
| 33.156522
| 79
| 0.532332
| false
| 4.743961
| false
| false
| false
|
sea-kg/inventory-files
|
contrib/ppa/build_source_pkg_for_ppa.py
|
1
|
6043
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import fileinput
import re
import subprocess
import os
import sys
import shutil
from pprint import pprint
import datetime
print ("Welcome to preapre ppa package...")
dists = []
# https://wiki.ubuntu.com/Releases
dists.append({
"name": "Ubuntu 16.04.7 LTS (xenial)",
"dist_name": "xenial",
"ppa_name_suffix": "ppa-ubuntu-16-04-xenial-1",
"end": "April 2021",
"version": "16.04.7 LTS"
})
dists.append({
"name": "Ubuntu 18.04.5 LTS (bionic)",
"dist_name": "bionic",
"ppa_name_suffix": "ppa-ubuntu-18-04-bionic-2",
"end": "April 2023",
"version": "18.04.5 LTS"
})
dists.append({
"name": "Ubuntu 20.04.2 LTS (focal)",
"dist_name": "focal",
"ppa_name_suffix": "ppa-ubuntu-20-04-focal-2",
"end": "April 2025",
"version": "20.04.2 LTS"
})
dists.append({
"name": "Ubuntu 20.10 (groovy)",
"dist_name": "groovy",
"ppa_name_suffix": "ppa-ubuntu-20-10-groovy-1",
"end": "July 2021",
"version": "20.10"
})
print("Please choose dist name:")
i = 0
for d in dists:
print(' ' + str(i) + '. ' + d['dist_name'] + ' (' + d['version'] + '), date end: ' + d['end'])
i = i + 1
dist_num_ = input("Enter number of dist: ")
dist_num_ = int(dist_num_)
if dist_num_ >= len(dists):
sys.exit("Wrong dist number")
dist_name_ = dists[dist_num_]['dist_name']
ppa_name_ = dists[dist_num_]['ppa_name_suffix']
print("Dist Name: " + dist_name_)
#############################################
def clear_all():
print( " -> Clear all")
if os.path.exists('./inventory-files'):
shutil.rmtree('./inventory-files')
print( " -> DONE")
print( " -> Cleanup previous ppa packages")
onlyfiles = [f for f in os.listdir('./') if os.path.isfile(os.path.join('./', f))]
for f in onlyfiles:
m = re.search(r'^inventory-files_(\d+\.\d+\.\d+)-ppa-.*(\.orig\.tar\.gz|source\.changes|_source\.build|_source.ppa.upload|\.tar\.gz|_source\.buildinfo|\.dsc)$', f)
if m:
print('Remove file ' + f)
os.remove(f)
clear_all()
print( " -> Prepare sources directory ")
os.mkdir('./inventory-files')
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
shutil.copytree('../../src', './inventory-files/src', symlinks=False, ignore=None)
shutil.copytree('../../res', './inventory-files/res', symlinks=False, ignore=None)
shutil.copy2('../../inventory-files.pro', './inventory-files/inventory-files.pro')
shutil.copy2('../../inventory-files.qrc', './inventory-files/inventory-files.qrc')
shutil.copy2('../../VERSION', './inventory-files/VERSION')
shutil.copy2('../../LICENSE', './inventory-files/LICENSE')
shutil.copytree('./debian', './inventory-files/debian', symlinks=False, ignore=None)
shutil.copytree('./install-files', './inventory-files/install-files', symlinks=False, ignore=None)
print( " -> DONE ")
#############################################
print( " -> Read version of package ")
f = open("../../VERSION",'r')
filedata = f.read()
f.close()
print(filedata)
m = re.search('(\\d+\\.\\d+\\.\\d+)', filedata)
if m:
current_version = m.group(1)
print ("\n *** Current version: " + current_version + "\n")
# parse CHANGELOG.md
changelog_list = []
version_logs = {'version': '', 'dt': '', 'logs': []}
lines = [line.rstrip('\n') for line in open('../../CHANGELOG.md')]
for li in lines:
m = re.search(r'[ ]*##[ ]+\[v(\d+\.\d+\.\d+)\][ ]*-[ ]*(\d+)-(\d+)-(\d+)[ ]*\((.*)\).*', li)
if m:
if version_logs['version'] != '':
changelog_list.append(version_logs)
version_logs = {'version': '', 'dt': '', 'logs': []}
ver = m.group(1)
year = int(m.group(2))
month = int(m.group(3))
day = int(m.group(4))
_dt = datetime.date(year, month, day)
# must be format Mon, 22 Mar 2010 00:37:31 +0100
dt = _dt.strftime("%a, %d %b %Y %H:%M:%S +0700")
version_logs['version'] = ver
version_logs['dt'] = dt
if version_logs['version'] == '':
continue
m = re.search('[ ]*-[ ]*(.*)', li)
if m:
line_log = m.group(1)
version_logs['logs'].append(line_log)
if version_logs['version'] != '':
changelog_list.append(version_logs)
version_logs = {'version': '', 'dt': '', 'logs': []}
print(version_logs)
#############################################
print( " -> Prepare changelog ")
changelog="./inventory-files/debian/changelog"
f = open(changelog,'w')
li_count = 0
for li in changelog_list:
if li_count != 0:
f.write("\n")
f.write("\n")
li_count = li_count + 1
f.write("inventory-files (" + li['version'] + "-" + ppa_name_ + ") " + dist_name_ + "; urgency=low\n\n")
for li_log in li['logs']:
li_log = li_log.strip()
if li_log != '':
f.write(" * " + li_log + "\n")
f.write("\n")
#if li['dt'] == '?':
# li['dt'] = subprocess.Popen(['date', '-R'], stdout=subprocess.PIPE).communicate()[0]
f.write(" -- Evgenii Sopov <mrseakg@gmail.com> " + li['dt']) # 2 space!!!
f.write("\n")
f.close()
print( " -> DONE ")
# TODO
# subprocess.call("./clean_sources_ppa.sh")
#############################################
print( " -> Prepare tar.gz source package ")
os.system("cd ./ && tar -acf inventory-files_" + current_version + "-" + ppa_name_ + ".orig.tar.gz inventory-files")
os.system("cd ./inventory-files && debuild -S -sa")
print( " -> DONE ")
dput_filename = "inventory-files_" + current_version + "-" + ppa_name_ + "_source.changes"
os.system("debsign -k 3AA3105C5766233DD2F243A3A742BE2E628592AC " + dput_filename)
sys.stdout.write("Are you want try upload source package to ppa.launchpad? [y/n]: ")
ask_upload_ = input().lower()
if ask_upload_ == "y":
os.system("dput ppa:sea5kg/inventory-files " + dput_filename)
|
mit
| 3,938,819,940,711,231,500
| 28.478049
| 171
| 0.557836
| false
| 2.965162
| false
| false
| false
|
Kaushikpatnaik/LSTM-Encoder-for-Driver-Telematics
|
dataProcess.py
|
1
|
7823
|
import numpy as np
import os
import random
from collections import defaultdict, Counter
from sklearn.utils import resample, shuffle
import math
def createFeatures(dataWin1, dataWin2, dataWin3):
# given three raw data windows compute velocity accelaration
# and change in direction
vecData = np.array(np.subtract(dataWin2, dataWin1))
vecData2 = np.array(np.subtract(dataWin3, dataWin2))
accData = np.subtract(vecData2, vecData)
dirData = np.arctan(np.divide(dataWin2[1],dataWin2[0]))
minVecX, minVecY = np.amin(vecData, axis=0)
maxVecX, maxVecY = np.amax(vecData, axis=0)
avgVecX, avgVecY = np.average(vecData, axis=0)
minAccX, minAccY = np.amin(accData, axis=0)
maxAccX, maxAccY = np.amax(accData, axis=0)
avgAccX, avgAccY = np.average(accData, axis=0)
minDir = np.amin(dirData, axis=0)
maxDir = np.amax(dirData, axis=0)
avgDir = np.average(dirData, axis=0)
featVector = [minVecX, minVecY, maxVecX, maxVecY, avgVecX, avgVecY, minDir, maxDir, avgDir, minAccX, minAccY, maxAccX, maxAccY, avgAccX, avgAccY]
return featVector
def getData(allpath):
# function which given a filepath returns seqID a list of sequence IDs
# and dataFile a numpy array containing the features
dataFile = []
seqID = []
filepath = []
for dirs, subdir, files in os.walk(allpath):
for ifile in files:
filepath.append(dirs + "/" + ifile)
for path in filepath:
s = path.split("/")
data = []
with open(path,"r") as filename:
count = 0
countSeq = 1
temp_collec = []
for line in filename:
a,b = line.split(",")
data.append([a,b[0:-1]])
i = 2
#round off the trip length to the nearest 200
rng = int(np.floor((len(data)-6)/200)*200)
while i<rng:
dataWin1 = np.array(data[i-1:i+3], dtype=float)
dataWin2 = np.array(data[i:i+4], dtype=float)
dataWin3 = np.array(data[i+1:i+5], dtype=float)
temp = createFeatures(dataWin1, dataWin2, dataWin3)
#convert all "nan's" and zeros to small values
for k in range(len(temp)):
if math.isnan(temp[k]):
temp[k] = 0.00001
temp_collec.append(temp[k])
count += 1
if count == 50:
#print len(temp_collec)
dataFile.append(temp_collec)
temp = s[3].split(".")
seqID.append(s[2]+"-"+temp[0]+"-"+str(countSeq))
temp_collec = []
countSeq += 1
count = 0
i += 4
dataFile = np.array(dataFile)
seqID = np.array(seqID)
returnVal = [seqID, dataFile]
return returnVal
if __name__ == "__main__":
ROOTDIR = "./"
subdirpath = []
# read through the directory to obtain subdirectory for each driver
for dirs, subdirs, files in os.walk(ROOTDIR+"data"):
for subdir in subdirs:
subdirpath.append(dirs+"/"+subdir)
# for each driver, we collect data from 40 other drivers as false
# trips
driver_collec = defaultdict(list)
for subdir in subdirpath:
s = subdir.split('/')
driver_collec[s[2]].append(subdir)
for j in range(1):
#draw a random choice
temp = random.choice(subdirpath)
if temp != subdir:
driver_collec[s[2]].append(temp)
# for each key of the dictionary we generate a csv file
for key in driver_collec.keys():
filepath = []
values = driver_collec[key]
print "Creating file for driver: " + str(key)
# get data for the driver
[dSeqID, dData] = getData(values[0])
# get data for other drivers
[oSeqID, oData] = getData(values[1])
'''
k = 2
while k < len(values[2:]):
[temp1, temp2] = getData(values[k])
#print temp1.shape, temp2.shape
#print oSeqID.shape, oData.shape
oSeqID = np.hstack((oSeqID, temp1))
oData = np.vstack((oData, temp2))
k += 1
'''
print oData.shape, dData.shape
print "Resampling Data"
if oData.shape[0] > dData.shape[0]:
row = dData.shape[0]
trow = oData.shape[0]
# resample data with replacement
while row < (trow-row):
temp1, temp2 = resample(dData, dSeqID, n_samples = row, random_state = 0)
#print temp1.shape, temp2.shape
#print dSeqID.shape, dData.shape
dSeqID = np.hstack((dSeqID, temp2))
dData = np.vstack((dData, temp1))
row += row
diff = trow - row
temp1, temp2 = resample(dData, dSeqID, n_samples = diff, random_state = 0)
dSeqID = np.hstack((dSeqID, temp2))
dData = np.vstack((dData, temp1))
else:
row = oData.shape[0]
trow = dData.shape[0]
# resample data with replacement
while row < (trow-row):
temp1, temp2 = resample(oData, oSeqID, n_samples = row, random_state = 0)
#print temp1.shape, temp2.shape
#print dSeqID.shape, dData.shape
oSeqID = np.hstack((oSeqID, temp2))
oData = np.vstack((oData, temp1))
row += row
diff = trow - row
temp1, temp2 = resample(oData, oSeqID, n_samples = diff, random_state = 0)
oSeqID = np.hstack((oSeqID, temp2))
oData = np.vstack((oData, temp1))
print oData.shape, dData.shape
print dSeqID.shape, oSeqID.shape
# append data
seqID = np.hstack((dSeqID, oSeqID))
data = np.vstack((dData, oData))
print "Shuffling Data"
# shuffle
seqID, data = shuffle(seqID, data, random_state = 0)
row, col = data.shape
print "Created Dataset in desired format"
# write to file
with open(ROOTDIR+"proc_data/datafile_"+str(key)+".csv","w") as filename:
for i in range(row):
writedata = data[i]
newwritedata = np.reshape(writedata, (50,15))
for j in range(50):
for k in range(14):
filename.write(str(newwritedata[j][k]))
filename.write(",")
filename.write(str(newwritedata[j][14]))
filename.write("\n")
# since class names are not unique, create a dictionary of names and save it also
with open(ROOTDIR+"proc_data/classfile_"+str(key)+".csv","w") as filename:
for i in range(row):
temp = seqID[i].split("-")
#print temp[0], str(key), temp[0] == str(key)
for k in range(50):
writedata = temp[0]
if writedata == str(key):
filename.write(str(1))
else:
filename.write(str(2))
filename.write("\n")
# write out the mapping
with open(ROOTDIR+"proc_data/classmap_"+str(key)+".csv","w") as filename:
for i in range(row):
writedata = seqID[i]
filename.write(writedata)
filename.write("\n")
|
mit
| -992,711,337,174,148,200
| 34.721461
| 149
| 0.51208
| false
| 3.793889
| false
| false
| false
|
zorna/zorna
|
zorna/site/templatetags/site_tags.py
|
1
|
1112
|
from django.template import TemplateSyntaxError
from django import template
register = template.Library()
from zorna.site.models import SiteOptions
class check_if_has_access_to_option_node(template.Node):
def __init__(self, key, var_name):
self.var_name = var_name
if not (key[0] == key[-1] and key[0] in ('"', "'")):
self.key = key
else:
self.key = key[1:-1]
def render(self, context):
request = context['request']
try:
context[self.var_name] = SiteOptions.objects.is_access_valid(
request.user, self.key)
except:
pass
return ''
@register.tag(name="check_if_has_access_to")
def check_if_has_access_to_option(parser, token):
bits = token.split_contents()
if 4 != len(bits):
raise TemplateSyntaxError('%r expects 4 arguments' % bits[0])
if bits[-2] != 'as':
raise TemplateSyntaxError(
'%r expects "as" as the second argument' % bits[0])
key = bits[1]
varname = bits[-1]
return check_if_has_access_to_option_node(key, varname)
|
bsd-3-clause
| -8,295,376,805,729,403,000
| 29.054054
| 73
| 0.598921
| false
| 3.61039
| false
| false
| false
|
EricssonResearch/calvin-base
|
calvin/runtime/south/transports/lib/twisted/base_transport.py
|
1
|
4660
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.utilities.calvin_callback import CalvinCBClass
from calvin.utilities import calvinlogger
_log = calvinlogger.get_logger(__name__)
class CalvinServerBase(CalvinCBClass):
"""
BaseServerClass for implementing calvinip servers in diffrent frameworks
Callbacks in the API
self._callback_execute('server_started', port)
Called when the server have started listening on the port
port is an integer port number of the lisening server
self._callback_execute('server_stopped')
Called when the server have stopped listening on the port
self._callback_execute('client_connected', uri, proto)
Called when a client is connected
uri is the uri that the client connected has example "calvinip://127.0.0.1:78445"
proto is the protocol to be sent to the CalvinTransportBase, can be none
"""
def __init__(self, iface='', port=0, callbacks=None, *args, **kwargs):
"""
iface The interface to listen on defaults to all
port The port to listen on defaults to system generated
This port should be returned in the server_started callback
callbacks The callbacks subscribed on this class
"""
super(CalvinServerBase, self).__init__(callbacks, callback_valid_names=['server_started',
'server_stopped',
'client_connected'])
def start(self):
"""
Called when the server transport is started
"""
raise NotImplementedError()
def stop(self):
"""
Called when the server transport is stopped
"""
raise NotImplementedError()
def is_listening(self):
"""
returns if the server is listening
"""
raise NotImplementedError()
class CalvinTransportBase(CalvinCBClass):
"""
BaseTransport for implementing calvinip transports in diffrent frameworks
self._callback_execute('connected')
Called when the client is connected
self._callback_execute('disconnected', reason)
Called when the client disconnects
reason the a string desribing the reason for disconnecting
(normal, error, ..)
self._callback_execute('connection_failed', reason)
Called when the connection fails
reason the a string desribing the reason for disconnecting
(normal, error, ..)
self._callback_execute('data', data)
Called when we have raw data in the transport.
Always an entire package
"""
def __init__(self, host, port, callbacks=None, proto=None, *args, **kwargs):
"""
host The host address of the client
port The port to connect to, 0 means system allocated
callback callbacks is a set of callbacks that the client wants
proto Can be sent in here if its a connecting client from a server instance
"""
self._rtt = None
super(CalvinTransportBase, self).__init__(callbacks, callback_valid_names=['connected', 'disconnected', 'connection_failed', 'data'])
def is_connected(self):
"""
returns True if the transport is connected
"""
raise NotImplementedError()
def disconnect(self):
"""
Used for disconnecting the client
"""
raise NotImplementedError()
def send(self, data):
"""
Used for sending data to the client
data Is raw data one package
"""
raise NotImplementedError()
def join(self):
"""
Called when the client should connect
"""
raise NotImplementedError()
def get_rtt(self):
return self._rtt
|
apache-2.0
| 6,143,106,125,151,566,000
| 35.692913
| 141
| 0.600858
| false
| 5.04329
| false
| false
| false
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/building/DistributedBuildingMgrAI.py
|
1
|
9120
|
from direct.directnotify.DirectNotifyGlobal import *
from otp.ai.AIBaseGlobal import *
from toontown.building import DistributedBuildingAI
from toontown.building import GagshopBuildingAI
from toontown.building import HQBuildingAI
from toontown.building import KartShopBuildingAI
from toontown.building import PetshopBuildingAI
from toontown.hood import ZoneUtil
# from toontown.building import DistributedAnimBuildingAI
class DistributedBuildingMgrAI:
notify = directNotify.newCategory('DistributedBuildingMgrAI')
def __init__(self, air, branchId, dnaStore, trophyMgr):
self.air = air
self.branchId = branchId
self.canonicalBranchId = ZoneUtil.getCanonicalZoneId(self.branchId)
self.dnaStore = dnaStore
self.trophyMgr = trophyMgr
self.__buildings = {}
self.findAllLandmarkBuildings()
def cleanup(self):
for building in self.__buildings.values():
building.cleanup()
self.__buildings = {}
def isValidBlockNumber(self, blockNumber):
return blockNumber in self.__buildings
def isSuitBlock(self, blockNumber):
if not self.isValidBlockNumber(blockNumber):
return False
return self.__buildings[blockNumber].isSuitBlock()
def getSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getEstablishedSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isEstablishedSuitBlock():
blocks.append(blockNumber)
return blocks
def getToonBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if isinstance(building, HQBuildingAI.HQBuildingAI):
continue
if isinstance(building, GagshopBuildingAI.GagshopBuildingAI):
continue
if isinstance(building, PetshopBuildingAI.PetshopBuildingAI):
continue
if isinstance(building, KartShopBuildingAI.KartShopBuildingAI):
continue
if not building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getBuildings(self):
return self.__buildings.values()
def getFrontDoorPoint(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].getFrontDoorPoint()
def getBuildingTrack(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].track
def getBuilding(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber]
def setFrontDoorPoint(self, blockNumber, point):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].setFrontDoorPoint(point)
def getDNABlockLists(self):
blocks = []
hqBlocks = []
gagshopBlocks = []
petshopBlocks = []
kartshopBlocks = []
animBldgBlocks = []
for i in xrange(self.dnaStore.getNumBlockNumbers()):
blockNumber = self.dnaStore.getBlockNumberAt(i)
buildingType = self.dnaStore.getBlockBuildingType(blockNumber)
if buildingType == 'hq':
hqBlocks.append(blockNumber)
elif buildingType == 'gagshop':
gagshopBlocks.append(blockNumber)
elif buildingType == 'petshop':
petshopBlocks.append(blockNumber)
elif buildingType == 'kartshop':
kartshopBlocks.append(blockNumber)
elif buildingType == 'animbldg':
animBldgBlocks.append(blockNumber)
else:
blocks.append(blockNumber)
return (blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks)
def findAllLandmarkBuildings(self):
backups = simbase.backups.load('blockinfo', (self.air.districtId, self.branchId), default={})
(blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks) = self.getDNABlockLists()
for blockNumber in blocks:
self.newBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in animBldgBlocks:
self.newAnimBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in hqBlocks:
self.newHQBuilding(blockNumber)
for blockNumber in gagshopBlocks:
self.newGagshopBuilding(blockNumber)
for block in petshopBlocks:
self.newPetshopBuilding(block)
for block in kartshopBlocks:
self.newKartShopBuilding(block)
def newBuilding(self, blockNumber, backup=None):
building = DistributedBuildingAI.DistributedBuildingAI(
self.air, blockNumber, self.branchId, self.trophyMgr)
building.generateWithRequired(self.branchId)
if backup is not None:
state = backup.get('state', 'toon')
if ((state == 'suit') and simbase.air.wantCogbuildings) or (
(state == 'cogdo') and simbase.air.wantCogdominiums):
building.track = backup.get('track', 'c')
building.difficulty = backup.get('difficulty', 1)
building.numFloors = backup.get('numFloors', 1)
building.updateSavedBy(backup.get('savedBy'))
building.becameSuitTime = backup.get('becameSuitTime', time.mktime(time.gmtime()))
if (state == 'suit') and simbase.air.wantCogbuildings:
building.setState('suit')
elif (state == 'cogdo') and simbase.air.wantCogdominiums:
building.setState('cogdo')
else:
building.setState('toon')
else:
building.setState('toon')
else:
building.setState('toon')
self.__buildings[blockNumber] = building
return building
def newAnimBuilding(self, blockNumber, backup=None):
return self.newBuilding(blockNumber, backup=backup)
def newHQBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = HQBuildingAI.HQBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newGagshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = GagshopBuildingAI.GagshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newPetshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = PetshopBuildingAI.PetshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newKartShopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = KartShopBuildingAI.KartShopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def save(self):
buildings = {}
for blockNumber in self.getSuitBlocks():
building = self.getBuilding(blockNumber)
backup = {
'state': building.fsm.getCurrentState().getName(),
'block': building.block,
'track': building.track,
'difficulty': building.difficulty,
'numFloors': building.numFloors,
'savedBy': building.savedBy,
'becameSuitTime': building.becameSuitTime
}
buildings[blockNumber] = backup
simbase.backups.save('blockinfo', (self.air.districtId, self.branchId), buildings)
|
mit
| 4,311,047,661,063,541,000
| 42.428571
| 101
| 0.651645
| false
| 4.075067
| false
| false
| false
|
fernandolobato/balarco
|
works/serializers.py
|
1
|
4427
|
from rest_framework import serializers
from . import models
from clients import serializers as client_serializers
from users import serializers as user_serializers
class WorkTypeSerializer(serializers.ModelSerializer):
name = serializers.CharField(read_only=True)
class Meta:
model = models.WorkType
fields = ('id', 'work_type_id', 'name',)
class ArtTypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtType
fields = ('id', 'work_type', 'name',)
class ArtIgualaSerializer(serializers.ModelSerializer):
art_type_name = serializers.CharField(source='art_type.name', read_only=True)
class Meta:
model = models.ArtIguala
fields = ('id', 'iguala', 'art_type', 'quantity', 'art_type_name')
class IgualaSerializer(serializers.ModelSerializer):
client_complete = client_serializers.ClientSerializer(source='client', read_only=True)
art_iguala = ArtIgualaSerializer(many=True, read_only=True)
class Meta:
model = models.Iguala
fields = ('id', 'client', 'client_complete', 'name', 'start_date', 'end_date',
'art_iguala',)
class StatusSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='__str__', read_only=True)
class Meta:
model = models.Status
fields = ('id', 'status_id', 'name',)
class ArtWorkSerializer(serializers.ModelSerializer):
art_type_complete = ArtTypeSerializer(source='art_type', read_only=True)
class Meta:
model = models.ArtWork
fields = ('id', 'work', 'art_type', 'quantity', 'art_type_complete')
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = models.File
fields = ('id', 'work', 'upload',)
class WorkDesignerSerializer(serializers.ModelSerializer):
start_date = serializers.DateTimeField(read_only=True)
end_date = serializers.DateTimeField(read_only=True)
designer_name = serializers.CharField(source='designer.first_name', read_only=True)
designer_last_name = serializers.CharField(source='designer.last_name', read_only=True)
class Meta:
model = models.WorkDesigner
fields = ('id', 'designer', 'designer_name', 'designer_last_name', 'work',
'start_date', 'end_date', 'active_work',)
class StatusChangeSerializer(serializers.ModelSerializer):
date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.StatusChange
fields = ('id', 'work', 'status', 'user', 'date',)
class WorkSerializer(serializers.ModelSerializer):
creation_date = serializers.DateField(read_only=True)
executive_complete = user_serializers.UserSerializer(source='executive', read_only=True)
contact_complete = client_serializers.ContactSerializer(source='contact', read_only=True)
current_status_complete = StatusSerializer(source='current_status', read_only=True)
work_type_complete = WorkTypeSerializer(source='work_type', read_only=True)
iguala_complete = IgualaSerializer(source='iguala', read_only=True)
art_works = ArtWorkSerializer(many=True, read_only=True)
files = FileSerializer(many=True, read_only=True)
work_designers = WorkDesignerSerializer(many=True, read_only=True)
status_changes = StatusChangeSerializer(many=True, read_only=True)
class Meta:
model = models.Work
fields = ('id',
'executive',
'executive_complete',
'contact',
'contact_complete',
'current_status',
'current_status_complete',
'work_type',
'work_type_complete',
'iguala',
'iguala_complete',
'creation_date',
'name',
'expected_delivery_date',
'brief',
'final_link',
'art_works',
'files',
'work_designers',
'status_changes'
)
class NotificationSerializer(serializers.ModelSerializer):
# work_complete = WorkSerializer(source='work', read_only=True)
# user_complete = user_serializers.UserSerializer(source='user', read_only=True)
class Meta:
model = models.Notification
fields = ('id', 'work', 'user', 'date', 'text', 'seen')
|
mit
| -4,358,667,984,826,051,600
| 31.313869
| 93
| 0.635871
| false
| 4.006335
| false
| false
| false
|
MarsZone/DreamLand
|
muddery/utils/readers.py
|
1
|
3270
|
"""
This module parse data files to lines.
"""
from __future__ import print_function
import csv
import codecs
try:
import xlrd
except ImportError:
xlrd = None
class DataReader(object):
"""
Game data file reader.
"""
types = None
def __init__(self, filename = None):
"""
Args:
filename: (String) data file's name.
Returns:
None
"""
self.filename = filename
def __iter__(self):
return self
def next(self):
return self.readln()
def readln(self):
"""
Read data line.
Returns:
list: data line
"""
# No data.
raise StopIteration
class CSVReader(DataReader):
"""
CSV file's reader.
"""
types = ("csv",)
def __init__(self, filename=None):
"""
Args:
filename: (String) data file's name.
Returns:
None
"""
super(CSVReader, self).__init__(filename)
self.reader = None
if filename:
csvfile = open(filename, 'r')
# test BOM
head = csvfile.read(len(codecs.BOM_UTF8))
if head != codecs.BOM_UTF8:
# read from beginning
csvfile.seek(0)
self.reader = csv.reader(csvfile)
def readln(self):
"""
Read data line.
Returns:
list: data line
"""
if not self.reader:
raise StopIteration
# Read line.
return self.reader.next()
class XLSReader(DataReader):
"""
XLS/XLSX file's reader.
"""
types = ("xls", "xlsx")
def __init__(self, filename=None):
"""
Args:
filename: (String) data file's name.
Returns:
None
"""
super(XLSReader, self).__init__(filename)
if not xlrd:
print('**********************************************************')
print('You need to install "xlrd" first to import xls/xlsx files!')
print('You can use "pip install xlrd" to install it! ')
print('**********************************************************')
return
# load file
self.sheet = None
self.row_pos = 0
if filename:
book = xlrd.open_workbook(filename)
self.sheet = book.sheet_by_index(0)
def readln(self):
"""
Read data line.
Returns:
list: data line
"""
if not self.sheet:
raise StopIteration
if self.row_pos >= self.sheet.nrows:
raise StopIteration
# Read line.
pos = self.row_pos
self.row_pos += 1
return self.sheet.row_values(pos)
all_readers = [CSVReader, XLSReader]
def get_readers():
"""
Get all available writers.
Returns:
list: available writers
"""
return all_readers
reader_dict = {type: reader for reader in all_readers for type in reader.types}
def get_reader(reader_type):
"""
Get a reader by reader's type.
Args:
type: (String) reader's type.
Returns:
reader
"""
return reader_dict.get(reader_type, None)
|
bsd-3-clause
| 2,917,408,387,520,421,400
| 19.31677
| 79
| 0.486544
| false
| 4.313984
| false
| false
| false
|
jerkern/nxt_slam
|
python/sonar.py
|
1
|
4434
|
import numpy as np
import math
class FOVCone(object):
""" Represent a FOV measurment """
def __init__(self, pos, theta, prec, dist):
self.xpos = pos[0]
self.ypos = pos[1]
self.theta = theta
self.prec = prec
self.dist = dist
def __call__(self, y, x, optdata = None):
""" Check if inside FOV, if so return coordinate and angle distance
for each y,x pair. If optdata is provided then the corresponding
entries are also returned.
y,x (and opdata) must have the same length """
x = np.asarray(x)
y = np.asarray(y)
# Absolute angle, world coordinates
a_w = np.arctan2(y-self.ypos, x-self.xpos)
# Angle relative sensor, use mod to enforce interval [-pi,pi]
tmp = a_w - (self.theta - math.pi)
a = np.mod(tmp, 2*math.pi) - math.pi
ind = np.abs(a) <= self.prec/2.0
if (not ind.any()):
return None
# Throw away all pairs outside FOV
newx = x[ind]-self.xpos
newy = y[ind]-self.ypos
newa = a[ind]
newa_w = a_w[ind]
# Calculating this manually instead of using norm
# and apply_along_axis is _much_ faster
r = np.sqrt((newx ** 2) + (newy ** 2))
rind = (r <= self.dist)
cnt = newa[rind].shape[0]
if (cnt == 0):
return None
# Create structure array contanings cells within FOV
if (optdata != None):
rval = np.empty(cnt, dtype=[('a', float),
('r', float),
('a_w', float),
('opt', optdata.dtype,
optdata.shape[1])])
rval['opt'] = (optdata[ind, :])[rind, :]
else:
rval = np.empty([cnt, 2], dtype=[('a', float),
('r', float),
('a_w', float)])
rval['a'] = newa[rind]
rval['r'] = r[rind]
rval['a_w'] = newa_w[rind]
return rval
def get_bounding_rect(self):
""" Find bounding rectange of FOV cone """
tmp1 = self.xpos + self.dist*math.cos(self.theta+self.prec/2.0)
tmp2 = self.xpos + self.dist*math.cos(self.theta-self.prec/2.0)
xmin = np.min((self.xpos, tmp1, tmp2))
xmax = np.max((self.xpos, tmp1, tmp2))
tmp1 = self.ypos + self.dist * math.sin(self.theta + self.prec / 2.0)
tmp2 = self.ypos + self.dist * math.sin(self.theta - self.prec / 2.0)
ymin = min((self.ypos, tmp1, tmp2))
ymax = max((self.ypos, tmp1, tmp2))
return (xmin, xmax, ymin, ymax)
class SonarMeasurement(object):
""" Class for handling sonar measurements,
converts from range to probability field """
def own_pdf(self, r, mu, scale):
pdf = 0.0*r
ind1 = np.abs(r-mu) < scale / 2
ind2 = np.abs(r-mu) < scale
max = 0.99
min = 0.01
pdf[ind1] = max
interpolate_inds = (~ind1)*ind2
pdf[interpolate_inds] = max* (1 - 2*(np.abs(r[interpolate_inds] - mu) - scale/2)/scale)
pdf[pdf < min] = min
return pdf
def __init__(self, cells, dist, prec, r_coeff, a_coeff, cov_offset, num_angles, norm=True):
""" Create probabilty field evaluated in 'cells' for distance 'dist' """
tmp = (cells['a_w'] + math.pi)*num_angles/(2.0*math.pi)
tmp = np.floor(tmp)
# Wrap-around
tmp[tmp >= num_angles] = num_angles-1
tmp = np.reshape(tmp,(-1,1))
self.indices = np.concatenate((cells['opt'], tmp), 1)
r = cells['r']
a = cells['a']
self.prob = self.own_pdf(r, dist, prec)
# Probabilty must sum to one
# However, there is a small chance of a spurios measurment, also numerically
# we run into trouble if any single cell would get p=1.0
if (norm):
total = np.sum(self.prob)
if (total > 0.0):
self.prob = 0.99*self.prob / total
else:
self.prob = self.prob + 0.01
self.var = r_coeff*r + a_coeff*abs(a) + cov_offset
self.var[r > (dist+prec/2.0)] = 1000
|
gpl-3.0
| 7,736,525,555,743,298,000
| 33.107692
| 95
| 0.489851
| false
| 3.519048
| false
| false
| false
|
momijiame/diagram-autobuild
|
diagram_autobuild/tool.py
|
1
|
2493
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import abc
import os
from future.utils import with_metaclass
class BuildCommand(with_metaclass(abc.ABCMeta)):
def __init__(self, src_file, dst_dir, opts=None):
self.src_file = src_file
self.dst_dir = dst_dir
self.opts = opts or ''
@abc.abstractproperty
def destination(self):
pass
@abc.abstractmethod
def __str__(self):
pass
class GraphvizBuild(BuildCommand):
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = 'dot {opts} -T png -o {destination} {src_file}'.format(
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class ERAlchemyBuild(BuildCommand):
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = 'eralchemy {opts} -i {src_file} -o {destination}'.format(
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class BlockdiagSeriesBuild(BuildCommand):
@abc.abstractproperty
def command(self):
pass
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = '{command} {opts} -o {destination} {src_file}'.format(
command=self.command,
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class BlockdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'blockdiag'
class NwdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'nwdiag'
class SeqdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'seqdiag'
class ActdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'actdiag'
_MAPPINGS = {
'graphviz': GraphvizBuild,
'blockdiag': BlockdiagBuild,
'nwdiag': NwdiagBuild,
'seqdiag': SeqdiagBuild,
'actdiag': ActdiagBuild,
'eralchemy': ERAlchemyBuild,
}
def get_tools():
return _MAPPINGS.keys()
def get_command(tool_name, src_file, dst_dir, opts=None):
class_ = _MAPPINGS.get(tool_name)
instance = class_(src_file, dst_dir, opts)
return instance
|
apache-2.0
| -557,459,658,530,651,140
| 19.603306
| 75
| 0.610911
| false
| 3.671576
| false
| false
| false
|
kg-bot/SupyBot
|
plugins/Izmeri/plugin.py
|
1
|
2100
|
###
# Copyright (c) 2013, KG-Bot
# All rights reserved.
#
#
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import random
import time
import re
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Izmeri')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class Izmeri(callbacks.Plugin):
"""Add the help for "@plugin help Izmeri" here
This should describe *how* to use this plugin."""
pass
_penis_responses = ('ima penis od 6cm.', 'ima penis od 15cm.', 'ima penis od 24cm.', 'ima penis od 9cm.', 'ima penis od 18cm.',
'ima penis od 22cm.', 'ima penis od 14cm.', 'ima penis od 17cm.', 'ima penis od 4cm.', 'ima penis od 12cm.', 'ima penis od 13cm.', 'ima enormno veliki penis i da se sa njim nije zajebavati, deco cuvajte se, stigo kuronja u grad')
_sike_odgovori = ('su ove sise velicine zrna graska.', 'su ove sise velicine decije glave.', 'da su ove sise taman kako treba.', 'da ova osoba uopste nema sisa.', 'mozes jednu u usta drugu pod glavu.', 'nije nasao nista, jad i beda.', 'ova osoba ima rak desne dojke.')
def penis(self, irc, msg, args, text, channel):
"""<nick>
Meri velicinu necijeg penisa.
"""
irc.reply(('KG-Bot vadi svoju strucnu spravu za merenje penisa, skida gace \x02%s\x02, meri i dolazi do zakljucka da ova osoba \x02%s\x02') %
(text, utils.iter.choice(self._penis_responses)))
penis = wrap(penis, ['nickInChannel', 'channel'])
def sike(self, irc, msg, args, name, channel):
"""<nick>
Meri velicinu siki. xD"""
irc.reply("KG-Bot vadi svoju strucnu spravu za merenje sisica, zaviruje \x02%s\x02 u grudjnak i zakljucuje da \x02%s\x02" % (name, utils.iter.choice(self._sike_odgovori)))
sike = wrap(sike, ['nickInChannel', 'channel'])
Class = Izmeri
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
gpl-3.0
| -2,426,150,339,966,610,000
| 37.888889
| 272
| 0.670952
| false
| 2.830189
| false
| false
| false
|
tensorflow/models
|
official/vision/beta/data/process_coco_few_shot_json_files.py
|
1
|
6042
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processes the JSON files for COCO few-shot.
We assume that `workdir` mirrors the contents of
http://dl.yf.io/fs-det/datasets/cocosplit/, which contains the official JSON
files for the few-shot COCO evaluation procedure that Wang et al. (2020)'s
"Frustratingly Simple Few-Shot Object Detection" paper uses.
"""
import collections
import itertools
import json
import logging
import os
from absl import app
from absl import flags
import tensorflow as tf
logger = tf.get_logger()
logger.setLevel(logging.INFO)
flags.DEFINE_string('workdir', None, 'Working directory.')
FLAGS = flags.FLAGS
CATEGORIES = ['airplane', 'apple', 'backpack', 'banana', 'baseball bat',
'baseball glove', 'bear', 'bed', 'bench', 'bicycle', 'bird',
'boat', 'book', 'bottle', 'bowl', 'broccoli', 'bus', 'cake',
'car', 'carrot', 'cat', 'cell phone', 'chair', 'clock', 'couch',
'cow', 'cup', 'dining table', 'dog', 'donut', 'elephant',
'fire hydrant', 'fork', 'frisbee', 'giraffe', 'hair drier',
'handbag', 'horse', 'hot dog', 'keyboard', 'kite', 'knife',
'laptop', 'microwave', 'motorcycle', 'mouse', 'orange', 'oven',
'parking meter', 'person', 'pizza', 'potted plant',
'refrigerator', 'remote', 'sandwich', 'scissors', 'sheep',
'sink', 'skateboard', 'skis', 'snowboard', 'spoon', 'sports ball',
'stop sign', 'suitcase', 'surfboard', 'teddy bear',
'tennis racket', 'tie', 'toaster', 'toilet', 'toothbrush',
'traffic light', 'train', 'truck', 'tv', 'umbrella', 'vase',
'wine glass', 'zebra']
SEEDS = list(range(10))
SHOTS = [10, 30]
FILE_SUFFIXES = collections.defaultdict(list)
for _seed, _shots in itertools.product(SEEDS, SHOTS):
for _category in CATEGORIES:
FILE_SUFFIXES[(_seed, _shots)].append(
'{}full_box_{}shot_{}_trainval.json'.format(
# http://dl.yf.io/fs-det/datasets/cocosplit/ is organized like so:
#
# datasplit/
# trainvalno5k.json
# 5k.json
# full_box_{1,2,3,5,10,30}shot_{category}_trainval.json
# seed{1-9}/
# full_box_{1,2,3,5,10,30}shot_{category}_trainval.json
#
# This means that the JSON files for seed0 are located in the root
# directory rather than in a `seed?/` subdirectory, hence the
# conditional expression below.
'' if _seed == 0 else 'seed{}/'.format(_seed),
_shots,
_category))
# Base class IDs, as defined in
# https://github.com/ucbdrive/few-shot-object-detection/blob/master/fsdet/evaluation/coco_evaluation.py#L60-L65
BASE_CLASS_IDS = [8, 10, 11, 13, 14, 15, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 65, 70, 73, 74, 75,
76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def main(unused_argv):
workdir = FLAGS.workdir
# Filter novel class annotations from the training and validation sets.
for name in ('trainvalno5k', '5k'):
file_path = os.path.join(workdir, 'datasplit', '{}.json'.format(name))
with tf.io.gfile.GFile(file_path, 'r') as f:
json_dict = json.load(f)
json_dict['annotations'] = [a for a in json_dict['annotations']
if a['category_id'] in BASE_CLASS_IDS]
output_path = os.path.join(
workdir, 'datasplit', '{}_base.json'.format(name))
with tf.io.gfile.GFile(output_path, 'w') as f:
json.dump(json_dict, f)
for seed, shots in itertools.product(SEEDS, SHOTS):
# Retrieve all examples for a given seed and shots setting.
file_paths = [os.path.join(workdir, suffix)
for suffix in FILE_SUFFIXES[(seed, shots)]]
json_dicts = []
for file_path in file_paths:
with tf.io.gfile.GFile(file_path, 'r') as f:
json_dicts.append(json.load(f))
# Make sure that all JSON files for a given seed and shots setting have the
# same metadata. We count on this to fuse them later on.
metadata_dicts = [{'info': d['info'], 'licenses': d['licenses'],
'categories': d['categories']} for d in json_dicts]
if not all(d == metadata_dicts[0] for d in metadata_dicts[1:]):
raise RuntimeError(
'JSON files for {} shots (seed {}) '.format(shots, seed) +
'have different info, licences, or categories fields')
# Retrieve images across all JSON files.
images = sum((d['images'] for d in json_dicts), [])
# Remove duplicate image entries.
images = list({image['id']: image for image in images}.values())
output_dict = {
'info': json_dicts[0]['info'],
'licenses': json_dicts[0]['licenses'],
'categories': json_dicts[0]['categories'],
'images': images,
'annotations': sum((d['annotations'] for d in json_dicts), [])
}
output_path = os.path.join(workdir,
'{}shot_seed{}.json'.format(shots, seed))
with tf.io.gfile.GFile(output_path, 'w') as f:
json.dump(output_dict, f)
logger.info('Processed %d shots (seed %d) and saved to %s',
shots, seed, output_path)
if __name__ == '__main__':
flags.mark_flag_as_required('workdir')
app.run(main)
|
apache-2.0
| 2,241,717,489,347,854,600
| 40.958333
| 111
| 0.602781
| false
| 3.354803
| false
| false
| false
|
frePPLe/frePPLe
|
freppledb/input/admin.py
|
1
|
29772
|
#
# Copyright (C) 2007-2020 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.utils.translation import gettext_lazy as _
from freppledb.input.models import Resource, Operation, Location, SetupMatrix, SetupRule
from freppledb.input.models import Buffer, Customer, Demand, Item, OperationResource
from freppledb.input.models import OperationMaterial, Skill, ResourceSkill, Supplier
from freppledb.input.models import (
Calendar,
CalendarBucket,
ManufacturingOrder,
SubOperation,
)
from freppledb.input.models import ItemSupplier, ItemDistribution, DistributionOrder
from freppledb.input.models import PurchaseOrder, DeliveryOrder, OperationPlanResource
from freppledb.input.models import OperationPlanMaterial
from freppledb.common.adminforms import MultiDBModelAdmin, MultiDBTabularInline
from freppledb.admin import data_site
class CalendarBucket_inline(MultiDBTabularInline):
model = CalendarBucket
extra = 0
exclude = ("source",)
class CalendarBucket_admin(MultiDBModelAdmin):
model = CalendarBucket
raw_id_fields = ("calendar",)
save_on_top = True
fieldsets = (
(None, {"fields": ("calendar", ("startdate", "enddate"), "value", "priority")}),
(
_("repeating pattern"),
{
"fields": (
("starttime", "endtime"),
(
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
),
)
},
),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_calendarbucket_change",
"permissions": "input.change_calendarbucket",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_calendarbucket_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_calendarbucket_history",
},
]
data_site.register(CalendarBucket, CalendarBucket_admin)
class Calendar_admin(MultiDBModelAdmin):
model = Calendar
save_on_top = True
inlines = [CalendarBucket_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_calendar_change",
"permissions": "input.change_calendar",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_calendar_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_calendar_history",
},
]
data_site.register(Calendar, Calendar_admin)
class Location_admin(MultiDBModelAdmin):
model = Location
raw_id_fields = ("available", "owner")
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_location_change",
"permissions": "input.change_location",
},
{
"name": "inboundorders",
"label": _("inbound distribution"),
"view": "input_distributionorder_in_by_location",
},
{
"name": "outboundorders",
"label": _("outbound distribution"),
"view": "input_distributionorder_out_by_location",
},
{
"name": "manufacturingorders",
"label": _("manufacturing orders"),
"view": "input_manufacturingorder_by_location",
},
{
"name": "purchaseorders",
"label": _("purchase orders"),
"view": "input_purchaseorder_by_location",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_location_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_location_history",
},
]
data_site.register(Location, Location_admin)
class Customer_admin(MultiDBModelAdmin):
model = Customer
raw_id_fields = ("owner",)
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_customer_change",
"permissions": "input.change_customer",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_customer_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_customer_history",
},
]
data_site.register(Customer, Customer_admin)
class ItemSupplier_inline(MultiDBTabularInline):
model = ItemSupplier
fk_name = "item"
raw_id_fields = ("supplier", "location", "resource")
extra = 0
exclude = ("source",)
class Supplier_admin(MultiDBModelAdmin):
model = Supplier
raw_id_fields = ("available", "owner")
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_supplier_change",
"permissions": "input.change_supplier",
},
{
"name": "purchaseorders",
"label": _("purchase orders"),
"view": "input_purchaseorder_by_supplier",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_supplier_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_supplier_history",
},
]
data_site.register(Supplier, Supplier_admin)
class OperationMaterial_inline(MultiDBTabularInline):
model = OperationMaterial
fields = (
"item",
"operation",
"quantity",
"quantity_fixed",
"type",
"transferbatch",
"offset",
"effective_start",
"effective_end",
)
raw_id_fields = ("operation", "item")
extra = 0
exclude = ("source",)
class OperationResource_inline(MultiDBTabularInline):
model = OperationResource
raw_id_fields = ("operation", "resource", "skill")
fields = (
"resource",
"operation",
"quantity",
"quantity_fixed",
"effective_start",
"effective_end",
"skill",
"setup",
"search",
)
extra = 0
exclude = ("source",)
class Item_admin(MultiDBModelAdmin):
model = Item
save_on_top = True
raw_id_fields = ("owner",)
inlines = [ItemSupplier_inline, OperationMaterial_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_item_change",
"permissions": "input.change_item",
},
{"name": "supplypath", "label": _("supply path"), "view": "supplypath_item"},
{"name": "whereused", "label": _("where used"), "view": "whereused_item"},
{"name": "plan", "label": _("plan"), "view": "output_demand_plandetail"},
{
"name": "inventory",
"label": _("inventory"),
"view": "output_buffer_plandetail_by_item",
},
{
"name": "inventorydetail",
"label": _("inventory detail"),
"view": "input_operationplanmaterial_plandetail_by_item",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_item_comment",
},
{"name": "history", "label": _("History"), "view": "admin:input_item_history"},
]
data_site.register(Item, Item_admin)
class ItemSupplier_admin(MultiDBModelAdmin):
model = ItemSupplier
save_on_top = True
raw_id_fields = ("item", "supplier", "resource")
exclude = ("source", "id")
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_itemsupplier_change",
"permissions": "input.change_itemsupplier",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_itemsupplier_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_itemsupplier_history",
},
]
data_site.register(ItemSupplier, ItemSupplier_admin)
class ItemDistribution_admin(MultiDBModelAdmin):
model = ItemDistribution
save_on_top = True
raw_id_fields = ("item", "resource")
exclude = ("source", "id")
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_itemdistribution_change",
"permissions": "input.change_itemdistribution",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_itemdistribution_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_itemdistribution_history",
},
]
data_site.register(ItemDistribution, ItemDistribution_admin)
class ChildOperation_inline(MultiDBTabularInline):
model = Operation
fk_name = "owner"
extra = 1
# raw_id_fields = ("owner",)
fields = (
"priority",
"name",
"effective_start",
"effective_end",
"location",
"type",
"duration",
"duration_per",
)
exclude = ("source",)
class SubOperation_inline(MultiDBTabularInline):
model = SubOperation
verbose_name = _("child operation")
verbose_name_plural = _("child suboperations")
fk_name = "operation"
extra = 1
raw_id_fields = ("suboperation",)
exclude = ("source",)
class ResourceSkill_inline(MultiDBTabularInline):
model = ResourceSkill
fk_name = "resource"
raw_id_fields = ("skill",)
extra = 1
exclude = ("source",)
class Operation_admin(MultiDBModelAdmin):
model = Operation
raw_id_fields = ("location", "item", "available", "owner")
save_on_top = True
inlines = [
OperationMaterial_inline,
OperationResource_inline,
ChildOperation_inline,
SubOperation_inline,
]
fieldsets = (
(
None,
{
"fields": (
"name",
"type",
"item",
"location",
"description",
"category",
"subcategory",
)
},
),
(
_("planning parameters"),
{
"fields": (
"fence",
"posttime",
"sizeminimum",
"sizemultiple",
"sizemaximum",
"cost",
"duration",
"duration_per",
"available",
)
},
),
(
_("alternate selection"),
{
"fields": (
"search",
"priority",
"effective_start",
"effective_end",
"owner",
)
},
),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operation_change",
"permissions": "input.change_operation",
},
{
"name": "supplypath",
"label": _("supply path"),
"view": "supplypath_operation",
},
{"name": "whereused", "label": _("where used"), "view": "whereused_operation"},
{"name": "plan", "label": _("plan"), "view": "output_operation_plandetail"},
{
"name": "plandetail",
"label": _("manufacturing orders"),
"view": "input_manufacturingorder_by_operation",
},
{
"name": "constraint",
"label": _("constrained demand"),
"view": "output_constraint_operation",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_operation_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_operation_history",
},
]
data_site.register(Operation, Operation_admin)
class SubOperation_admin(MultiDBModelAdmin):
model = SubOperation
raw_id_fields = ("operation", "suboperation")
save_on_top = True
exclude = ("source", "id")
data_site.register(SubOperation, SubOperation_admin)
class Buffer_admin(MultiDBModelAdmin):
raw_id_fields = ("location", "item", "minimum_calendar")
fieldsets = (
(
None,
{
"fields": (
"item",
"location",
"batch",
"description",
"category",
"subcategory",
)
},
),
(_("inventory"), {"fields": ("onhand",)}),
(
_("planning parameters"),
{"fields": ("type", "minimum", "minimum_calendar", "min_interval")},
),
)
save_on_top = True
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_buffer_change",
"permissions": "input.change_buffer",
},
{"name": "supplypath", "label": _("supply path"), "view": "supplypath_buffer"},
{"name": "whereused", "label": _("where used"), "view": "whereused_buffer"},
{"name": "plan", "label": _("plan"), "view": "output_buffer_plandetail"},
{
"name": "plandetail",
"label": _("plan detail"),
"view": "input_operationplanmaterial_plandetail_by_buffer",
},
{
"name": "constraint",
"label": _("constrained demand"),
"view": "output_constraint_buffer",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_buffer_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_buffer_history",
},
]
data_site.register(Buffer, Buffer_admin)
class SetupRule_inline(MultiDBTabularInline):
model = SetupRule
extra = 3
exclude = ("source",)
class SetupRule_admin(MultiDBModelAdmin):
model = SetupRule
raw_id_fields = ("setupmatrix",)
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_setuprule_change",
"permissions": "input.change_setuprule",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_setuprule_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_setuprule_history",
},
]
data_site.register(SetupRule, SetupRule_admin)
class SetupMatrix_admin(MultiDBModelAdmin):
model = SetupMatrix
save_on_top = True
inlines = [SetupRule_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_setupmatrix_change",
"permissions": "input.change_setupmatrix",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_setupmatrix_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_setupmatrix_history",
},
]
data_site.register(SetupMatrix, SetupMatrix_admin)
class Skill_admin(MultiDBModelAdmin):
model = Skill
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_skill_change",
"permissions": "input.change_skill",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_skill_comment",
},
{"name": "history", "label": _("History"), "view": "admin:input_skill_history"},
]
data_site.register(Skill, Skill_admin)
class ResourceSkill_admin(MultiDBModelAdmin):
model = ResourceSkill
raw_id_fields = ("resource", "skill")
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_resourceskill_change",
"permissions": "input.change_resoureskill",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_resourceskill_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_resourceskill_history",
},
]
data_site.register(ResourceSkill, ResourceSkill_admin)
class Resource_admin(MultiDBModelAdmin):
model = Resource
raw_id_fields = (
"maximum_calendar",
"location",
"setupmatrix",
"owner",
"available",
"efficiency_calendar",
)
save_on_top = True
inlines = [OperationResource_inline, ResourceSkill_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_resource_change",
"permissions": "input.change_resource",
},
{
"name": "supplypath",
"label": _("supply path"),
"view": "supplypath_resource",
},
{"name": "whereused", "label": _("where used"), "view": "whereused_resource"},
{"name": "plan", "label": _("plan"), "view": "output_resource_plandetail"},
{
"name": "plandetail",
"label": _("plan detail"),
"view": "input_operationplanresource_plandetail",
},
{
"name": "constraint",
"label": _("constrained demand"),
"view": "output_constraint_resource",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_resource_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_resource_history",
},
]
data_site.register(Resource, Resource_admin)
class OperationMaterial_admin(MultiDBModelAdmin):
model = OperationMaterial
raw_id_fields = ("operation", "item")
save_on_top = True
exclude = ("id",)
fieldsets = (
(
None,
{
"fields": (
"item",
"operation",
"type",
"quantity",
"quantity_fixed",
"transferbatch",
"offset",
("effective_start", "effective_end"),
)
},
),
(_("alternates"), {"fields": ("name", "priority", "search")}),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationmaterial_change",
"permissions": "input.change_operationmaterial",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_operationmaterial_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_operationmaterial_history",
},
]
data_site.register(OperationMaterial, OperationMaterial_admin)
class OperationResource_admin(MultiDBModelAdmin):
model = OperationResource
raw_id_fields = ("operation", "resource", "skill")
save_on_top = True
exclude = ("id",)
fieldsets = (
(
None,
{
"fields": (
"resource",
"operation",
"quantity",
"quantity_fixed",
"skill",
"setup",
("effective_start", "effective_end"),
)
},
),
(_("alternates"), {"fields": ("name", "priority", "search")}),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationresource_change",
"permissions": "input.change_operationresource",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_operationresource_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_operationresource_history",
},
]
data_site.register(OperationResource, OperationResource_admin)
class ManufacturingOrder_admin(MultiDBModelAdmin):
model = ManufacturingOrder
raw_id_fields = ("operation", "owner")
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"operation",
"quantity",
"startdate",
"enddate",
"owner",
"status",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"origin",
"destination",
"item",
"supplier",
"location",
"demand",
"name",
"due",
"color",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_manufacturingorder_change",
"permissions": "input.change_manufacturingorder",
}
]
data_site.register(ManufacturingOrder, ManufacturingOrder_admin)
class DistributionOrder_admin(MultiDBModelAdmin):
model = DistributionOrder
raw_id_fields = ("item",)
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"item",
"origin",
"destination",
"quantity",
"shipping_date",
"receipt_date",
"status",
"batch",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"operation",
"owner",
"color",
"supplier",
"location",
"demand",
"name",
"due",
"startdate",
"enddate",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_distributionorder_change",
"permissions": "input.change_distributionorder",
}
]
data_site.register(DistributionOrder, DistributionOrder_admin)
class PurchaseOrder_admin(MultiDBModelAdmin):
model = PurchaseOrder
raw_id_fields = ("item", "supplier")
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"item",
"location",
"supplier",
"quantity",
"ordering_date",
"receipt_date",
"status",
"batch",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"operation",
"owner",
"color",
"origin",
"destination",
"demand",
"name",
"due",
"startdate",
"enddate",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_purchaseorder_change",
"permissions": "input.change_purchaseorder",
}
]
data_site.register(PurchaseOrder, PurchaseOrder_admin)
class DeliveryOrder_admin(MultiDBModelAdmin):
model = DeliveryOrder
raw_id_fields = ("item", "demand")
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"demand",
"item",
"location",
"quantity",
"status",
"batch",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"operation",
"owner",
"color",
"origin",
"destination",
"name",
"supplier",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_deliveryorder_change",
"permissions": "input.change_deliveryorder",
}
]
data_site.register(DeliveryOrder, DeliveryOrder_admin)
class Demand_admin(MultiDBModelAdmin):
model = Demand
raw_id_fields = ("customer", "item", "operation", "owner")
fieldsets = (
(
None,
{
"fields": (
"name",
"item",
"location",
"customer",
"due",
"quantity",
"batch",
"priority",
"status",
"description",
"category",
"subcategory",
"owner",
)
},
),
(
_("planning parameters"),
{"fields": ("operation", "minshipment", "maxlateness")},
),
)
save_on_top = True
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_demand_change",
"permissions": "input.change_demand",
},
{"name": "supplypath", "label": _("supply path"), "view": "supplypath_demand"},
{
"name": "constraint",
"label": _("why short or late?"),
"view": "output_constraint_demand",
},
{"name": "plan", "label": _("plan"), "view": "output_demand_pegging"},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_demand_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_demand_history",
},
]
data_site.register(Demand, Demand_admin)
class OperationPlanResource_admin(MultiDBModelAdmin):
model = OperationPlanResource
raw_id_fields = (
"operationplan",
) # TODO a foreign key to OperationPlan doesn't work because it's an abstract class without admin
save_on_top = True
fieldsets = (
(None, {"fields": ("operationplan", "resource", "status")}),
(_("computed fields"), {"fields": ("quantity", "startdate", "enddate")}),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationplanresource_change",
"permissions": "input.change_operationplanresource",
}
]
data_site.register(OperationPlanResource, OperationPlanResource_admin)
class OperationPlanMaterial_admin(MultiDBModelAdmin):
model = OperationPlanMaterial
raw_id_fields = (
"operationplan",
"item",
) # TODO a foreign key to OperationPlan doesn't work because it's an abstract class without admin
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"operationplan",
"item",
"location",
"status",
"quantity",
"flowdate",
)
},
),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationplanmaterial_change",
"permissions": "input.change_operationplanmaterial",
}
]
data_site.register(OperationPlanMaterial, OperationPlanMaterial_admin)
|
agpl-3.0
| 1,851,207,254,629,476,900
| 25.300353
| 102
| 0.465505
| false
| 4.366036
| false
| false
| false
|
mabuchilab/Instrumental
|
instrumental/drivers/util.py
|
1
|
14998
|
# -*- coding: utf-8 -*-
# Copyright 2015-2019 Nate Bogdanowicz
"""
Helpful utilities for writing drivers.
"""
import copy
import contextlib
from inspect import getargspec
import pint
from past.builtins import basestring
from . import decorator
from .. import Q_, u
from ..log import get_logger
log = get_logger(__name__)
__all__ = ['check_units', 'unit_mag', 'check_enums', 'as_enum', 'visa_timeout_context']
def to_quantity(value):
"""Convert to a pint.Quantity
This function handles offset units in strings slightly better than Q_ does. It uses caching to
avoid reparsing strings.
"""
try:
quantity = copy.copy(to_quantity.cache[value])
except (KeyError, TypeError): # key is missing or unhashable
quantity = _to_quantity(value)
if isinstance(value, basestring):
to_quantity.cache[value] = copy.copy(quantity) # Guard against mutation
return quantity
to_quantity.cache = {}
def _to_quantity(value):
"""Convert to a pint.Quantity
This function handles offset units in strings slightly better than Q_ does.
"""
try:
return Q_(value)
except Exception as e:
log.info(e)
try:
mag_str, units = value.split()
try:
mag = int(mag_str)
except ValueError:
mag = float(mag_str)
return Q_(mag, units)
except Exception as e:
raise ValueError('Could not construct Quantity from {}'.format(value))
def as_enum(enum_type, arg):
"""Check if arg is an instance or key of enum_type, and return that enum"""
if isinstance(arg, enum_type):
return arg
try:
return enum_type[arg]
except KeyError:
raise ValueError("{} is not a valid {} enum".format(arg, enum_type.__name__))
def check_units(*pos, **named):
"""Decorator to enforce the dimensionality of input args and return values.
Allows strings and anything that can be passed as a single arg to `pint.Quantity`.
::
@check_units(value='V')
def set_voltage(value):
pass # `value` will be a pint.Quantity with Volt-like units
"""
def inout_map(arg, unit_info, name=None):
if unit_info is None:
return arg
use_units_msg = (" Make sure you're passing in a unitful value, either as a string or by "
"using `instrumental.u` or `instrumental.Q_()`")
optional, units = unit_info
if optional and arg is None:
return None
elif arg == 0:
# Allow naked zeroes as long as we're using absolute units (e.g. not degF)
# It's a bit dicey using this private method; works in 0.6 at least
if units._ok_for_muldiv():
return Q_(arg, units)
else:
if name is not None:
extra_msg = " for argument '{}'.".format(name) + use_units_msg
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=extra_msg)
else:
extra_msg = " for return value." + use_units_msg
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=extra_msg)
else:
q = to_quantity(arg)
if q.dimensionality != units.dimensionality:
extra_info = '' if isinstance(arg, Q_) else use_units_msg
if name is not None:
extra_msg = " for argument '{}'.".format(name) + extra_info
raise pint.DimensionalityError(q.units, units.units, extra_msg=extra_msg)
else:
extra_msg = " for return value." + extra_info
raise pint.DimensionalityError(q.units, units.units, extra_msg=extra_msg)
return q
return _unit_decorator(inout_map, inout_map, pos, named)
def unit_mag(*pos, **named):
"""Decorator to extract the magnitudes of input args and return values.
Allows strings and anything that can be passed as a single arg to `pint.Quantity`.
::
@unit_mag(value='V')
def set_voltage(value):
pass # The input must be in Volt-like units and `value` will be a raw number
# expressing the magnitude in Volts
"""
def in_map(arg, unit_info, name):
if unit_info is None:
return arg
optional, units = unit_info
if optional and arg is None:
return None
elif arg == 0:
# Allow naked zeroes as long as we're using absolute units (e.g. not degF)
# It's a bit dicey using this private method; works in 0.6 at least
if units._ok_for_muldiv():
return arg
else:
if name is not None:
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=" for argument '{}'".format(name))
else:
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=" for return value")
else:
q = to_quantity(arg)
try:
if q.units == units:
return q.magnitude # Speed up the common case
else:
return q.to(units).magnitude
except pint.DimensionalityError:
raise pint.DimensionalityError(q.units, units.units,
extra_msg=" for argument '{}'".format(name))
def out_map(res, unit_info):
if unit_info is None:
return res
optional, units = unit_info
if optional and res is None:
return None
else:
q = to_quantity(res)
try:
return q
except pint.DimensionalityError:
raise pint.DimensionalityError(q.units, units.units, extra_msg=" for return value")
return _unit_decorator(in_map, out_map, pos, named)
def check_enums(**kw_args):
"""Decorator to type-check input arguments as enums.
Allows strings and anything that can be passed to `~instrumental.drivers.util.as_enum`.
::
@check_enums(mode=SampleMode)
def set_mode(mode):
pass # `mode` will be of type SampleMode
"""
def checker_factory(enum_type, arg_name):
def checker(arg):
return as_enum(enum_type, arg)
return checker
return arg_decorator(checker_factory, (), kw_args)
def arg_decorator(checker_factory, dec_pos_args, dec_kw_args):
"""Produces a decorator that checks the arguments to the function in wraps.
Parameters
----------
checker_factory : function
Takes the args (decorator_arg_val, arg_name) and produces a 'checker' function, which takes
and returns a single value. When acting simply as a checker, it takes the arg, checks that
it is valid (using the ``decorator_arg_val`` and/or ``arg_name``), raises an Exception if
it is not, and returns the value unchanged if it is. Additionally, the checker may return a
different value, e.g. a ``str`` which has been converted to a ``Quantity`` as in
``check_units()``.
dec_pos_args : tuple
The positional args (i.e. *args) passed to the decorator constructor
dec_kw_args : dict
The keyword args (i.e. **kwargs) passed to the decorator constructor
"""
def wrap(func):
"""Function that actually wraps the function to be decorated"""
arg_names, vargs, kwds, default_vals = getargspec(func)
default_vals = default_vals or ()
pos_arg_names = {i: name for i, name in enumerate(arg_names)}
# Put everything in one dict
for dec_arg_val, arg_name in zip(dec_pos_args, arg_names):
if arg_name in dec_kw_args:
raise TypeError("Argument specified twice, by both position and name")
dec_kw_args[arg_name] = dec_arg_val
checkers = {}
new_defaults = {}
num_nondefs = len(arg_names) - len(default_vals)
for default_val, arg_name in zip(default_vals, arg_names[num_nondefs:]):
if arg_name in dec_kw_args:
checker = checker_factory(dec_kw_args[arg_name], arg_name)
checkers[arg_name] = checker
new_defaults[arg_name] = checker(default_val)
for arg_name in arg_names[:num_nondefs]:
if arg_name in dec_kw_args:
checkers[arg_name] = checker_factory(dec_kw_args[arg_name], arg_name)
def wrapper(func, *args, **kwds):
checked = new_defaults.copy()
checked.update({name: (checkers[name](arg) if name in checkers else arg) for name, arg
in kwds.items()})
for i, arg in enumerate(args):
name = pos_arg_names[i]
checked[name] = checkers[name](arg) if name in checkers else arg
result = func(**checked)
return result
return decorator.decorate(func, wrapper)
return wrap
def _unit_decorator(in_map, out_map, pos_args, named_args):
def wrap(func):
ret = named_args.pop('ret', None)
if ret is None:
ret_units = None
elif isinstance(ret, tuple):
ret_units = []
for arg in ret:
if arg is None:
unit = None
elif isinstance(arg, basestring):
optional = arg.startswith('?')
if optional:
arg = arg[1:]
unit = (optional, to_quantity(arg))
ret_units.append(unit)
ret_units = tuple(ret_units)
else:
optional = ret.startswith('?')
if optional:
arg = ret[1:]
ret_units = to_quantity(arg)
arg_names, vargs, kwds, defaults = getargspec(func)
pos_units = []
for arg in pos_args:
if arg is None:
unit = None
elif isinstance(arg, basestring):
optional = arg.startswith('?')
if optional:
arg = arg[1:]
unit = (optional, to_quantity(arg))
else:
raise TypeError("Each arg spec must be a string or None")
pos_units.append(unit)
named_units = {}
for name, arg in named_args.items():
if arg is None:
unit = None
elif isinstance(arg, basestring):
optional = arg.startswith('?')
if optional:
arg = arg[1:]
unit = (optional, to_quantity(arg))
else:
raise TypeError("Each arg spec must be a string or None")
named_units[name] = unit
# Add positional units to named units
for i, units in enumerate(pos_units):
name = arg_names[i]
if name in named_units:
raise Exception("Units of {} specified by position and by name".format(name))
named_units[name] = units
# Pad out the rest of the positional units with None
pos_units.extend([None] * (len(arg_names) - len(pos_args)))
# Add named units to positional units
for name, units in named_units.items():
try:
i = arg_names.index(name)
pos_units[i] = units
except ValueError:
pass
defaults = tuple() if defaults is None else defaults
# Convert the defaults
new_defaults = {}
ndefs = len(defaults)
for d, unit, n in zip(defaults, pos_units[-ndefs:], arg_names[-ndefs:]):
new_defaults[n] = d if unit is None else in_map(d, unit, n)
def wrapper(func, *args, **kwargs):
# Convert the input arguments
new_args = [in_map(a, u, n) for a, u, n in zip(args, pos_units, arg_names)]
new_kwargs = {n: in_map(a, named_units.get(n, None), n) for n, a in kwargs.items()}
# Fill in converted defaults
for name in arg_names[max(len(args), len(arg_names)-len(defaults)):]:
if name not in new_kwargs:
new_kwargs[name] = new_defaults[name]
result = func(*new_args, **new_kwargs)
# Allow for unit checking of multiple return values
if isinstance(ret_units, tuple):
return tuple(map(out_map, result, ret_units))
else:
return out_map(result, ret_units)
return decorator.decorate(func, wrapper)
return wrap
@contextlib.contextmanager
def visa_timeout_context(resource, timeout):
"""Context manager for temporarily setting a visa resource's timeout.
::
with visa_timeout_context(rsrc, 100):
... # `rsrc` will have a timeout of 100 ms within this block
"""
old_timeout = resource.timeout
resource.timeout = timeout
yield
resource.timeout = old_timeout
_ALLOWED_VISA_ATTRS = ['timeout', 'read_termination', 'write_termination', 'end_input', 'parity',
'baud_rate']
@contextlib.contextmanager
def visa_context(resource, **settings):
"""Context manager for temporarily setting a visa resource's settings
The settings will be set at the beginning, then reset to their previous values at the end of the
context. Only the settings mentioned below are supported, and they must be specified as keyword
arguments.
If the resource does not have a given setting, it will be ignored.
Parameters
----------
resource : VISA resource
The resource to temporarily modify
timeout :
read_termination :
write_termination :
end_input :
parity :
baud_rate :
"""
old_values = {}
attr_names = list(key for key in settings.keys() if hasattr(resource, key))
for attr_name in attr_names:
if attr_name not in _ALLOWED_VISA_ATTRS:
raise AttributeError("VISA attribute '{}' is not supported by this context manager")
for attr_name in attr_names:
old_values[attr_name] = getattr(resource, attr_name)
setattr(resource, attr_name, settings[attr_name])
yield
for attr_name in reversed(attr_names):
setattr(resource, attr_name, old_values[attr_name])
|
gpl-3.0
| -6,475,790,318,701,356,000
| 34.940887
| 100
| 0.55274
| false
| 4.27903
| false
| false
| false
|
ingadhoc/odoo-infrastructure
|
infrastructure/wizard/rename_db_wizard.py
|
1
|
1333
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, api, models
class infrastructure_rename_db_name(models.TransientModel):
_name = "infrastructure.rename_database.name"
_description = "Infrastructure Rename Database Name Wizard"
name = fields.Char(
'New Database Name',
size=64,
required=True
)
# database_type_id = fields.Many2one(
# 'infrastructure.database_type',
# string='Database Type',
# required=True,
# )
# TODO rmeove as we no longer use db prefix
# @api.onchange('database_type_id')
# def onchange_database_type_id(self):
# if self.database_type_id:
# self.name = self.database_type_id.prefix + '_'
# TODO send suggested backup data
@api.multi
def action_confirm(self):
active_id = self._context.get('active_id')
if not active_id:
return False
active_record = self.env['infrastructure.database'].browse(active_id)
active_record.rename_db(self.name)
# active_record.database_type_id = self.database_type_id
|
agpl-3.0
| 8,406,193,038,402,414,000
| 34.078947
| 78
| 0.562641
| false
| 4.114198
| false
| false
| false
|
halftk/OpenShareNow
|
OpenShareNow/OpenShareNow/settings.py
|
1
|
2380
|
"""
Django settings for OpenShareNow project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import os.path
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'etlrhar2t*+sbit%hoibvxftrvy%#6%)&9#x6@p()94cqr%i-v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__),'../osnow/templates'),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'osnow',
'django.contrib.admin',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'OpenShareNow.urls'
WSGI_APPLICATION = 'OpenShareNow.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'basededades.db',
'USER': '',
'PASSWORD': '',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
gpl-2.0
| -430,860,475,850,630,200
| 24.319149
| 85
| 0.714286
| false
| 3.207547
| false
| false
| false
|
hgijeon/the_PLAY
|
Structure/View/SelectScreenView.py
|
1
|
5849
|
from .View import *
import os
from ..Scene.GameScene import GameScene
from tkinter.filedialog import askopenfilename
class SelectScreenView(View):
def onInit(self):
self.red = gameapi.Color(255,0,0)
self.fileSelected = False
self.background = self.resizeImage(gameapi.image.load(os.path.join("Image","easy_background.jpg")), (800,340))
self.icon_pressed = self.resizeImage(gameapi.image.load(os.path.join("Image","icon_pressed.png")), (150,150))
self.icon_unpressed = self.resizeImage(gameapi.image.load(os.path.join("Image","icon_unpressed.png")), (150,150))
self.easy = self.resizeImage(gameapi.image.load(os.path.join("Image","easy.png")), (300,150))
self.normal = self.resizeImage(gameapi.image.load(os.path.join("Image","normal.png")), (300,150))
self.hard = self.resizeImage(gameapi.image.load(os.path.join("Image","hard.png")), (300,150))
self.title1 = self.resizeImage(gameapi.image.load(os.path.join("Image","title1.png")), (300,170))
self.title2 = self.resizeImage(gameapi.image.load(os.path.join("Image","title2.png")), (300,200))
self.title3 = self.resizeImage(gameapi.image.load(os.path.join("Image","title3.png")), (300,200))
self.title4 = self.resizeImage(gameapi.image.load(os.path.join("Image","title3.png")), (300,200))
self.title5 = self.resizeImage(gameapi.image.load(os.path.join("Image","title1.png")), (300,170))
self.title6 = self.resizeImage(gameapi.image.load(os.path.join("Image","title2.png")), (300,200))
self.title7 = self.resizeImage(gameapi.image.load(os.path.join("Image","title7.png")), (300,200))
self.title8 = self.resizeImage(gameapi.image.load(os.path.join("Image","title2.png")), (300,200))
self.title9 = self.resizeImage(gameapi.image.load(os.path.join("Image","title3.png")), (300,200))
self.mode = 1
self.icon = 1
def onDraw(self):
self.fill((200,200,200))
#self.drawRect(self.red, (0, 0, 800, 600))
leftTop = (0,0)
self.drawImage (self.background, leftTop)
if self.mode == 1:
leftTop = (0,0)
self.drawImage (self.easy, leftTop)
elif self.mode == 2:
leftTop = (250,0)
self.drawImage (self.normal, leftTop)
elif self.mode == 3:
leftTop = (500,0)
self.drawImage (self.hard, leftTop)
self.drawIcons()
def onUpdateTime(self, time):
if self.fileSelected:
self.scene.sceneManager.pushGameScene(self.scene.filename)
if self.keyMiddle.check(self.keyMiddle.key['5']):
self.mode = 1
elif self.keyMiddle.check(self.keyMiddle.key['6']):
self.mode = 2
elif self.keyMiddle.check(self.keyMiddle.key['7']):
self.mode = 3
elif self.keyMiddle.check(self.keyMiddle.key['r']):
self.icon = 1
elif self.keyMiddle.check(self.keyMiddle.key['t']):
self.icon = 2
elif self.keyMiddle.check(self.keyMiddle.key['y']):
self.icon = 3
elif self.keyMiddle.check(81):
self.fileopen()
def fileopen(self):
print ('Fileopen')
print (self.icon)
if self.mode == 1:
if self.icon == 1:
self.scene.filename = "MIDI/Music/Twinkle Twinkle Little Star.mid"
elif self.icon == 2:
self.scene.filename = "MIDI/Music/Happy Birthday.mid"
elif self.icon == 3:
self.scene.filename = "MIDI/Music/Amazing Grace.mid"
if self.mode == 2:
if self.icon == 1:
self.scene.filename = "MIDI/Music/moonlight-movement1.mid"
elif self.icon == 2:
self.scene.filename = "MIDI/Music/wagner-bridal-chorus.mid"
elif self.icon == 3:
self.scene.filename = "MIDI/Music/pachelbels-canon-arranged.mid"
if self.mode == 3:
if self.icon == 1:
self.scene.filename = "MIDI/Music/Minuet.mid"
elif self.icon == 2:
self.scene.filename = "MIDI/Music/idina_menzel-let_it_go.mid"
elif self.icon == 3:
self.scene.filename = "MIDI/Music/The-Entertainer.mid"
self.fileSelected = True
def drawIcons(self):
leftTop = (80,50)
self.drawImage (self.icon_unpressed, leftTop)
leftTop = (330,50)
self.drawImage (self.icon_unpressed, leftTop)
leftTop = (580,50)
self.drawImage (self.icon_unpressed, leftTop)
if self.icon == 1:
leftTop = (80, 50)
self.drawImage (self.icon_pressed, leftTop)
elif self.icon == 2:
leftTop = (330,50)
self.drawImage (self.icon_pressed, leftTop)
elif self.icon == 3:
leftTop = (580,50)
self.drawImage (self.icon_pressed, leftTop)
if self.mode == 1:
leftTop = (0,200)
self.drawImage (self.title1, leftTop)
leftTop = (250,200)
self.drawImage (self.title2, leftTop)
leftTop = (500,200)
self.drawImage (self.title3, leftTop)
if self.mode == 2:
leftTop = (0,200)
self.drawImage (self.title4, leftTop)
leftTop = (250,200)
self.drawImage (self.title5, leftTop)
leftTop = (500,200)
self.drawImage (self.title6, leftTop)
if self.mode == 3:
leftTop = (0,200)
self.drawImage (self.title7, leftTop)
leftTop = (250,200)
self.drawImage (self.title8, leftTop)
leftTop = (500,200)
self.drawImage (self.title9, leftTop)
|
mit
| 6,012,414,013,244,064,000
| 42.007353
| 121
| 0.570525
| false
| 3.392691
| false
| false
| false
|
chaudum/crate-top
|
cstat/widgets.py
|
1
|
8226
|
# vi: set encoding=utf-8
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
import urwid
from datetime import datetime
from .utils import byte_size
from .log import get_logger
logger = get_logger(__name__)
class BarWidgetBase(urwid.Text):
START = '['
END = ']'
SINGLE = '='
DOUBLE = '#'
WATERMARK_LOW = 0.80
WATERMARK_HIGH = 0.95
def __init__(self, label, symbol):
self.label = '{0:<10}'.format(label[:9])
self.symbol = symbol
super().__init__(self.label)
def rows(self, size, focus=False):
return 1
class HorizontalBar(BarWidgetBase):
def __init__(self, label, current=0.0, total=100.0, symbol=BarWidgetBase.DOUBLE):
super().__init__(label, symbol)
self.set_progress(current, total)
def set_progress(self, current=0.0, total=100.0):
self.progress = total > 0 and current / total or 0.0
self.current = current
self.total = total
self._invalidate()
def color(self):
if self.progress < self.WATERMARK_LOW:
return 'text_green'
elif self.progress < self.WATERMARK_HIGH:
return 'text_yellow'
return 'text_red'
def render(self, size, focus=False):
(maxcol, ) = size
label_len = len(self.label)
steps = maxcol - 2 - label_len
chars = round(float(steps) * self.progress)
bar = self.symbol * chars
text = self.progress_text()
base = bar + ' ' * (steps - chars)
base = base[:len(base)-len(text)] + text
line_attr = [('default', label_len + 1)]
if chars:
line_attr += [(self.color(), chars)]
line_attr += [('default', 1 + steps - chars)]
line = self.label + self.START + base + self.END
return urwid.TextCanvas([line.encode('utf-8'), ],
attr=[line_attr],
maxcol=maxcol)
class HorizontalPercentBar(HorizontalBar):
def progress_text(self):
return '{:.1%}'.format(self.progress)
class HorizontalBytesBar(HorizontalBar):
def progress_text(self):
return '{}/{}'.format(byte_size(self.current), byte_size(self.total))
class MultiBarWidget(urwid.Pile):
def __init__(self, title, bar_cls=HorizontalPercentBar, **bar_options):
self.title = title
self.bar_cls = bar_cls
self.bar = bar_cls('', **bar_options)
self.details = urwid.Pile([])
widgets = [
self.bar,
self.details,
]
self._history = []
super().__init__(widgets)
def toggle_details(self):
if len(self.details.contents):
self.details.contents = []
else:
self.append_node_bars()
def append_node_bars(self):
bars = []
for value in self._history:
bar = self.bar_cls(value[2], value[0], value[1],
symbol=HorizontalBar.SINGLE)
bars.append((bar, ('pack', None)))
self.details.contents = bars
return len(bars)
def sum(self, values=[]):
logger.debug('%s', [sum([x[0] for x in values]), sum([x[1] for x in values])])
return (sum([x[0] for x in values]), sum([x[1] for x in values]))
def set_data(self, values=[]):
self._history = values
self.bar.set_progress(*self.sum(values))
if len(self.details.contents) and \
self.append_node_bars():
for idx, widget in enumerate(self.details.contents):
bar = widget[0]
bar.set_progress(*values[idx][:2])
class IOBar(BarWidgetBase):
"""
Tx ... sent/written/outbound
Rx ... received/read/inbound
"""
def __init__(self, label, suffix='p/s'):
super().__init__(label, 'x')
self.tpl = '{0}: {1:>11}'
self.suffix = suffix
self.set_progress(0.0, 0.0)
def set_progress(self, tx=0.0, rx=0.0):
self.tx = tx
self.rx = rx
self._invalidate()
def render(self, size, focus=False):
"""
LABEL [ Tx: 0.0 b/s Rx: 0.0b/s ]
+----------+-+-+----+----------+...-+----+----------+-+-+
10 1 1 4 11 1 4 11 1 1
+--------------+---------------+...-+---------------+---+
12 15 1 15 2
+-------------------------------...---------------------+
43
"""
(maxcol, ) = size
label_len = len(self.label) # sanity check. should always be 10
var = maxcol - 45
if var < 1:
raise AssertionError('IOBar requires a minimum width of 45 columns!')
text = ' '
text += self.tpl.format('Tx', byte_size(self.tx, suffix=self.suffix, k=1000))
text += ' ' * var
text += self.tpl.format('Rx', byte_size(self.rx, suffix=self.suffix, k=1000))
text += ' '
line_attr = [
('default', 12),
('tx', 15),
('default', var),
('rx', 15),
('default', 2),
]
line = self.label + self.START + text + self.END
return urwid.TextCanvas([line.encode('utf-8'), ],
attr=[line_attr],
maxcol=maxcol)
class IOStatWidget(MultiBarWidget):
def __init__(self, title, suffix):
super().__init__(title, bar_cls=IOBar, suffix=suffix)
self.suffix = suffix
def append_node_bars(self):
bars = []
for ts, packets, name in self._history:
bar = self.bar_cls(name, suffix=self.suffix)
bars.append((bar, ('pack', None)))
self.details.contents = bars
return len(bars)
def sum(self, values=[]):
tx_total = 0.0
rx_total = 0.0
if len(self._history):
for idx, value in enumerate(values):
if self._history[idx][0] < values[idx][0]:
tx, rx = self._calculate(values[idx], self._history[idx])
tx_total += tx
rx_total += rx
return tx_total, rx_total
def set_data(self, values=[]):
"""
:param values: a list of [timestamp, {'tx': ..., 'rx': ...}, node_name]
"""
if len(self._history) and \
len(self.details.contents) and \
self.append_node_bars():
for idx, widget in enumerate(self.details.contents):
bar = widget[0]
if self._history[idx][0] >= values[idx][0]:
tx, rx = bar.tx, bar.rx
else:
tx, rx = self._calculate(values[idx], self._history[idx])
bar.set_progress(tx, rx)
self.bar.set_progress(*self.sum(values))
self._history = values
def _calculate(self, value, last_value):
prev_timestamp, prev_values, prev_name = last_value
timestamp, values, name = value
assert prev_name == name
diff = (timestamp - prev_timestamp).total_seconds()
tx = (values['tx'] - prev_values['tx']) / diff
rx = (values['rx'] - prev_values['rx']) / diff
return tx, rx
|
apache-2.0
| -6,437,177,651,761,129,000
| 33.275
| 86
| 0.532093
| false
| 3.894886
| false
| false
| false
|
gennaios/alfred-gnosis
|
src/playhouse/sqlite_ext.py
|
1
|
43137
|
import json
import math
import re
import struct
import sys
from peewee import *
from peewee import ColumnBase
from peewee import EnclosedNodeList
from peewee import Entity
from peewee import Expression
from peewee import Node
from peewee import NodeList
from peewee import OP
from peewee import VirtualField
from peewee import merge_dict
from peewee import sqlite3
try:
from playhouse._sqlite_ext import (
backup,
backup_to_file,
Blob,
ConnectionHelper,
register_bloomfilter,
register_hash_functions,
register_rank_functions,
sqlite_get_db_status,
sqlite_get_status,
TableFunction,
ZeroBlob,
)
CYTHON_SQLITE_EXTENSIONS = True
except ImportError:
CYTHON_SQLITE_EXTENSIONS = False
if sys.version_info[0] == 3:
basestring = str
FTS3_MATCHINFO = 'pcx'
FTS4_MATCHINFO = 'pcnalx'
if sqlite3 is not None:
FTS_VERSION = 4 if sqlite3.sqlite_version_info[:3] >= (3, 7, 4) else 3
else:
FTS_VERSION = 3
FTS5_MIN_SQLITE_VERSION = (3, 9, 0)
class RowIDField(AutoField):
auto_increment = True
column_name = name = required_name = 'rowid'
def bind(self, model, name, *args):
if name != self.required_name:
raise ValueError('%s must be named "%s".' %
(type(self), self.required_name))
super(RowIDField, self).bind(model, name, *args)
class DocIDField(RowIDField):
column_name = name = required_name = 'docid'
class AutoIncrementField(AutoField):
def ddl(self, ctx):
node_list = super(AutoIncrementField, self).ddl(ctx)
return NodeList((node_list, SQL('AUTOINCREMENT')))
class JSONPath(ColumnBase):
def __init__(self, field, path=None):
super(JSONPath, self).__init__()
self._field = field
self._path = path or ()
@property
def path(self):
return Value('$%s' % ''.join(self._path))
def __getitem__(self, idx):
if isinstance(idx, int):
item = '[%s]' % idx
else:
item = '.%s' % idx
return JSONPath(self._field, self._path + (item,))
def set(self, value, as_json=None):
if as_json or isinstance(value, (list, dict)):
value = fn.json(self._field._json_dumps(value))
return fn.json_set(self._field, self.path, value)
def update(self, value):
return self.set(fn.json_patch(self, self._field._json_dumps(value)))
def remove(self):
return fn.json_remove(self._field, self.path)
def json_type(self):
return fn.json_type(self._field, self.path)
def length(self):
return fn.json_array_length(self._field, self.path)
def children(self):
return fn.json_each(self._field, self.path)
def tree(self):
return fn.json_tree(self._field, self.path)
def __sql__(self, ctx):
return ctx.sql(fn.json_extract(self._field, self.path)
if self._path else self._field)
class JSONField(TextField):
field_type = 'JSON'
def __init__(self, json_dumps=None, json_loads=None, **kwargs):
self._json_dumps = json_dumps or json.dumps
self._json_loads = json_loads or json.loads
super(JSONField, self).__init__(**kwargs)
def python_value(self, value):
if value is not None:
try:
return self._json_loads(value)
except (TypeError, ValueError):
return value
def db_value(self, value):
if value is not None:
if not isinstance(value, Node):
value = fn.json(self._json_dumps(value))
return value
def _e(op):
def inner(self, rhs):
if isinstance(rhs, (list, dict)):
rhs = Value(rhs, converter=self.db_value, unpack=False)
return Expression(self, op, rhs)
return inner
__eq__ = _e(OP.EQ)
__ne__ = _e(OP.NE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__hash__ = Field.__hash__
def __getitem__(self, item):
return JSONPath(self)[item]
def set(self, value, as_json=None):
return JSONPath(self).set(value, as_json)
def update(self, data):
return JSONPath(self).update(data)
def remove(self):
return JSONPath(self).remove()
def json_type(self):
return fn.json_type(self)
def length(self):
return fn.json_array_length(self)
def children(self):
"""
Schema of `json_each` and `json_tree`:
key,
value,
type TEXT (object, array, string, etc),
atom (value for primitive/scalar types, NULL for array and object)
id INTEGER (unique identifier for element)
parent INTEGER (unique identifier of parent element or NULL)
fullkey TEXT (full path describing element)
path TEXT (path to the container of the current element)
json JSON hidden (1st input parameter to function)
root TEXT hidden (2nd input parameter, path at which to start)
"""
return fn.json_each(self)
def tree(self):
return fn.json_tree(self)
class SearchField(Field):
def __init__(self, unindexed=False, column_name=None, **k):
if k:
raise ValueError('SearchField does not accept these keyword '
'arguments: %s.' % sorted(k))
super(SearchField, self).__init__(unindexed=unindexed,
column_name=column_name, null=True)
def match(self, term):
return match(self, term)
class VirtualTableSchemaManager(SchemaManager):
def _create_virtual_table(self, safe=True, **options):
options = self.model.clean_options(
merge_dict(self.model._meta.options, options))
# Structure:
# CREATE VIRTUAL TABLE <model>
# USING <extension_module>
# ([prefix_arguments, ...] fields, ... [arguments, ...], [options...])
ctx = self._create_context()
ctx.literal('CREATE VIRTUAL TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
(ctx
.sql(self.model)
.literal(' USING '))
ext_module = self.model._meta.extension_module
if isinstance(ext_module, Node):
return ctx.sql(ext_module)
ctx.sql(SQL(ext_module)).literal(' ')
arguments = []
meta = self.model._meta
if meta.prefix_arguments:
arguments.extend([SQL(a) for a in meta.prefix_arguments])
# Constraints, data-types, foreign and primary keys are all omitted.
for field in meta.sorted_fields:
if isinstance(field, (RowIDField)) or field._hidden:
continue
field_def = [Entity(field.column_name)]
if field.unindexed:
field_def.append(SQL('UNINDEXED'))
arguments.append(NodeList(field_def))
if meta.arguments:
arguments.extend([SQL(a) for a in meta.arguments])
if options:
arguments.extend(self._create_table_option_sql(options))
return ctx.sql(EnclosedNodeList(arguments))
def _create_table(self, safe=True, **options):
if issubclass(self.model, VirtualModel):
return self._create_virtual_table(safe, **options)
return super(VirtualTableSchemaManager, self)._create_table(
safe, **options)
class VirtualModel(Model):
class Meta:
arguments = None
extension_module = None
prefix_arguments = None
primary_key = False
schema_manager_class = VirtualTableSchemaManager
@classmethod
def clean_options(cls, options):
return options
class BaseFTSModel(VirtualModel):
@classmethod
def clean_options(cls, options):
content = options.get('content')
prefix = options.get('prefix')
tokenize = options.get('tokenize')
if isinstance(content, basestring) and content == '':
# Special-case content-less full-text search tables.
options['content'] = "''"
elif isinstance(content, Field):
# Special-case to ensure fields are fully-qualified.
options['content'] = Entity(content.model._meta.table_name,
content.column_name)
if prefix:
if isinstance(prefix, (list, tuple)):
prefix = ','.join([str(i) for i in prefix])
options['prefix'] = "'%s'" % prefix.strip("' ")
if tokenize and cls._meta.extension_module.lower() == 'fts5':
# Tokenizers need to be in quoted string for FTS5, but not for FTS3
# or FTS4.
options['tokenize'] = '"%s"' % tokenize
return options
class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
_alphabet = 'abcdefghijklmnopqrstuvwxyz'
_alphanum = (set('\t ,"(){}*:_+0123456789') |
set(_alphabet) |
set(_alphabet.upper()) |
set((chr(26),)))
_invalid_ascii = set(chr(p) for p in range(128) if chr(p) not in _alphanum)
_quote_re = re.compile('(?:[^\s"]|"(?:\\.|[^"])*")+')
class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
def ClosureTable(model_class, foreign_key=None, referencing_class=None,
referencing_key=None):
"""Model factory for the transitive closure extension."""
if referencing_class is None:
referencing_class = model_class
if foreign_key is None:
for field_obj in model_class._meta.refs:
if field_obj.rel_model is model_class:
foreign_key = field_obj
break
else:
raise ValueError('Unable to find self-referential foreign key.')
source_key = model_class._meta.primary_key
if referencing_key is None:
referencing_key = source_key
class BaseClosureTable(VirtualModel):
depth = VirtualField(IntegerField)
id = VirtualField(IntegerField)
idcolumn = VirtualField(TextField)
parentcolumn = VirtualField(TextField)
root = VirtualField(IntegerField)
tablename = VirtualField(TextField)
class Meta:
extension_module = 'transitive_closure'
@classmethod
def descendants(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.id))
.where(cls.root == node)
.objects())
if depth is not None:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def ancestors(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.root))
.where(cls.id == node)
.objects())
if depth:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def siblings(cls, node, include_node=False):
if referencing_class is model_class:
# self-join
fk_value = node.__data__.get(foreign_key.name)
query = model_class.select().where(foreign_key == fk_value)
else:
# siblings as given in reference_class
siblings = (referencing_class
.select(referencing_key)
.join(cls, on=(foreign_key == cls.root))
.where((cls.id == node) & (cls.depth == 1)))
# the according models
query = (model_class
.select()
.where(source_key << siblings)
.objects())
if not include_node:
query = query.where(source_key != node)
return query
class Meta:
database = referencing_class._meta.database
options = {
'tablename': referencing_class._meta.table_name,
'idcolumn': referencing_key.column_name,
'parentcolumn': foreign_key.column_name}
primary_key = False
name = '%sClosure' % model_class.__name__
return type(name, (BaseClosureTable,), {'Meta': Meta})
class LSMTable(VirtualModel):
class Meta:
extension_module = 'lsm1'
filename = None
@classmethod
def clean_options(cls, options):
filename = cls._meta.filename
if not filename:
raise ValueError('LSM1 extension requires that you specify a '
'filename for the LSM database.')
else:
if len(filename) >= 2 and filename[0] != '"':
filename = '"%s"' % filename
if not cls._meta.primary_key:
raise ValueError('LSM1 models must specify a primary-key field.')
key = cls._meta.primary_key
if isinstance(key, AutoField):
raise ValueError('LSM1 models must explicitly declare a primary '
'key field.')
if not isinstance(key, (TextField, BlobField, IntegerField)):
raise ValueError('LSM1 key must be a TextField, BlobField, or '
'IntegerField.')
key._hidden = True
if isinstance(key, IntegerField):
data_type = 'UINT'
elif isinstance(key, BlobField):
data_type = 'BLOB'
else:
data_type = 'TEXT'
cls._meta.prefix_arguments = [filename, '"%s"' % key.name, data_type]
# Does the key map to a scalar value, or a tuple of values?
if len(cls._meta.sorted_fields) == 2:
cls._meta._value_field = cls._meta.sorted_fields[1]
else:
cls._meta._value_field = None
return options
@classmethod
def load_extension(cls, path='lsm.so'):
cls._meta.database.load_extension(path)
@staticmethod
def slice_to_expr(key, idx):
if idx.start is not None and idx.stop is not None:
return key.between(idx.start, idx.stop)
elif idx.start is not None:
return key >= idx.start
elif idx.stop is not None:
return key <= idx.stop
@staticmethod
def _apply_lookup_to_query(query, key, lookup):
if isinstance(lookup, slice):
expr = LSMTable.slice_to_expr(key, lookup)
if expr is not None:
query = query.where(expr)
return query, False
elif isinstance(lookup, Expression):
return query.where(lookup), False
else:
return query.where(key == lookup), True
@classmethod
def get_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.select().namedtuples(),
cls._meta.primary_key,
pk)
if is_single:
try:
row = query.get()
except cls.DoesNotExist:
raise KeyError(pk)
return row[1] if cls._meta._value_field is not None else row
else:
return query
@classmethod
def set_by_id(cls, key, value):
if cls._meta._value_field is not None:
data = {cls._meta._value_field: value}
elif isinstance(value, tuple):
data = {}
for field, fval in zip(cls._meta.sorted_fields[1:], value):
data[field] = fval
elif isinstance(value, dict):
data = value
elif isinstance(value, cls):
data = value.__dict__
data[cls._meta.primary_key] = key
cls.replace(data).execute()
@classmethod
def delete_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.delete(),
cls._meta.primary_key,
pk)
return query.execute()
OP.MATCH = 'MATCH'
def _sqlite_regexp(regex, value):
return re.search(regex, value) is not None
class SqliteExtDatabase(SqliteDatabase):
def __init__(self, database, c_extensions=None, rank_functions=True,
hash_functions=False, regexp_function=False,
bloomfilter=False, json_contains=False, *args, **kwargs):
super(SqliteExtDatabase, self).__init__(database, *args, **kwargs)
self._row_factory = None
if c_extensions and not CYTHON_SQLITE_EXTENSIONS:
raise ImproperlyConfigured('SqliteExtDatabase initialized with '
'C extensions, but shared library was '
'not found!')
prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False)
if rank_functions:
if prefer_c:
register_rank_functions(self)
else:
self.register_function(bm25, 'fts_bm25')
self.register_function(rank, 'fts_rank')
self.register_function(bm25, 'fts_bm25f') # Fall back to bm25.
self.register_function(bm25, 'fts_lucene')
if hash_functions:
if not prefer_c:
raise ValueError('C extension required to register hash '
'functions.')
register_hash_functions(self)
if regexp_function:
self.register_function(_sqlite_regexp, 'regexp', 2)
if bloomfilter:
if not prefer_c:
raise ValueError('C extension required to use bloomfilter.')
register_bloomfilter(self)
if json_contains:
self.register_function(_json_contains, 'json_contains')
self._c_extensions = prefer_c
def _add_conn_hooks(self, conn):
super(SqliteExtDatabase, self)._add_conn_hooks(conn)
if self._row_factory:
conn.row_factory = self._row_factory
def row_factory(self, fn):
self._row_factory = fn
if CYTHON_SQLITE_EXTENSIONS:
SQLITE_STATUS_MEMORY_USED = 0
SQLITE_STATUS_PAGECACHE_USED = 1
SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
SQLITE_STATUS_SCRATCH_USED = 3
SQLITE_STATUS_SCRATCH_OVERFLOW = 4
SQLITE_STATUS_MALLOC_SIZE = 5
SQLITE_STATUS_PARSER_STACK = 6
SQLITE_STATUS_PAGECACHE_SIZE = 7
SQLITE_STATUS_SCRATCH_SIZE = 8
SQLITE_STATUS_MALLOC_COUNT = 9
SQLITE_DBSTATUS_LOOKASIDE_USED = 0
SQLITE_DBSTATUS_CACHE_USED = 1
SQLITE_DBSTATUS_SCHEMA_USED = 2
SQLITE_DBSTATUS_STMT_USED = 3
SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
SQLITE_DBSTATUS_CACHE_HIT = 7
SQLITE_DBSTATUS_CACHE_MISS = 8
SQLITE_DBSTATUS_CACHE_WRITE = 9
SQLITE_DBSTATUS_DEFERRED_FKS = 10
#SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
def __status__(flag, return_highwater=False):
"""
Expose a sqlite3_status() call for a particular flag as a property of
the Database object.
"""
def getter(self):
result = sqlite_get_status(flag)
return result[1] if return_highwater else result
return property(getter)
def __dbstatus__(flag, return_highwater=False, return_current=False):
"""
Expose a sqlite3_dbstatus() call for a particular flag as a property of
the Database instance. Unlike sqlite3_status(), the dbstatus properties
pertain to the current connection.
"""
def getter(self):
if self._state.conn is None:
raise ImproperlyConfigured('database connection not opened.')
result = sqlite_get_db_status(self._state.conn, flag)
if return_current:
return result[0]
return result[1] if return_highwater else result
return property(getter)
class CSqliteExtDatabase(SqliteExtDatabase):
def __init__(self, *args, **kwargs):
self._conn_helper = None
self._commit_hook = self._rollback_hook = self._update_hook = None
self._replace_busy_handler = False
super(CSqliteExtDatabase, self).__init__(*args, **kwargs)
def init(self, database, replace_busy_handler=False, **kwargs):
super(CSqliteExtDatabase, self).init(database, **kwargs)
self._replace_busy_handler = replace_busy_handler
def _close(self, conn):
if self._commit_hook:
self._conn_helper.set_commit_hook(None)
if self._rollback_hook:
self._conn_helper.set_rollback_hook(None)
if self._update_hook:
self._conn_helper.set_update_hook(None)
return super(CSqliteExtDatabase, self)._close(conn)
def _add_conn_hooks(self, conn):
super(CSqliteExtDatabase, self)._add_conn_hooks(conn)
self._conn_helper = ConnectionHelper(conn)
if self._commit_hook is not None:
self._conn_helper.set_commit_hook(self._commit_hook)
if self._rollback_hook is not None:
self._conn_helper.set_rollback_hook(self._rollback_hook)
if self._update_hook is not None:
self._conn_helper.set_update_hook(self._update_hook)
if self._replace_busy_handler:
timeout = self._timeout or 5
self._conn_helper.set_busy_handler(timeout * 1000)
def on_commit(self, fn):
self._commit_hook = fn
if not self.is_closed():
self._conn_helper.set_commit_hook(fn)
return fn
def on_rollback(self, fn):
self._rollback_hook = fn
if not self.is_closed():
self._conn_helper.set_rollback_hook(fn)
return fn
def on_update(self, fn):
self._update_hook = fn
if not self.is_closed():
self._conn_helper.set_update_hook(fn)
return fn
def changes(self):
return self._conn_helper.changes()
@property
def last_insert_rowid(self):
return self._conn_helper.last_insert_rowid()
@property
def autocommit(self):
return self._conn_helper.autocommit()
def backup(self, destination, pages=None, name=None, progress=None):
return backup(self.connection(), destination.connection(),
pages=pages, name=name, progress=progress)
def backup_to_file(self, filename, pages=None, name=None,
progress=None):
return backup_to_file(self.connection(), filename, pages=pages,
name=name, progress=progress)
def blob_open(self, table, column, rowid, read_only=False):
return Blob(self, table, column, rowid, read_only)
# Status properties.
memory_used = __status__(SQLITE_STATUS_MEMORY_USED)
malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True)
malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT)
pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED)
pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW)
pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True)
scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED)
scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW)
scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True)
# Connection status properties.
lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED)
lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True)
lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
True)
lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
True)
cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True)
#cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED,
# False, True)
schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True)
statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True)
cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True)
cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True)
cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True)
def match(lhs, rhs):
return Expression(lhs, OP.MATCH, rhs)
def _parse_match_info(buf):
# See http://sqlite.org/fts3.html#matchinfo
bufsize = len(buf) # Length in bytes.
return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
def get_weights(ncol, raw_weights):
if not raw_weights:
return [1] * ncol
else:
weights = [0] * ncol
for i, weight in enumerate(raw_weights):
weights[i] = weight
return weights
# Ranking implementation, which parse matchinfo.
def rank(raw_match_info, *raw_weights):
# Handle match_info called w/default args 'pcx' - based on the example rank
# function http://sqlite.org/fts3.html#appendix_a
match_info = _parse_match_info(raw_match_info)
score = 0.0
p, c = match_info[:2]
weights = get_weights(c, raw_weights)
# matchinfo X value corresponds to, for each phrase in the search query, a
# list of 3 values for each column in the search table.
# So if we have a two-phrase search query and three columns of data, the
# following would be the layout:
# p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8]
# p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17]
for phrase_num in range(p):
phrase_info_idx = 2 + (phrase_num * c * 3)
for col_num in range(c):
weight = weights[col_num]
if not weight:
continue
col_idx = phrase_info_idx + (col_num * 3)
# The idea is that we count the number of times the phrase appears
# in this column of the current row, compared to how many times it
# appears in this column across all rows. The ratio of these values
# provides a rough way to score based on "high value" terms.
row_hits = match_info[col_idx]
all_rows_hits = match_info[col_idx + 1]
if row_hits > 0:
score += weight * (float(row_hits) / all_rows_hits)
return -score
# Okapi BM25 ranking implementation (FTS4 only).
def bm25(raw_match_info, *args):
"""
Usage:
# Format string *must* be pcnalx
# Second parameter to bm25 specifies the index of the column, on
# the table being queries.
bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank
"""
match_info = _parse_match_info(raw_match_info)
K = 1.2
B = 0.75
score = 0.0
P_O, C_O, N_O, A_O = range(4) # Offsets into the matchinfo buffer.
term_count = match_info[P_O] # n
col_count = match_info[C_O]
total_docs = match_info[N_O] # N
L_O = A_O + col_count
X_O = L_O + col_count
weights = get_weights(col_count, args)
for i in range(term_count):
for j in range(col_count):
weight = weights[j]
if weight == 0:
continue
x = X_O + (3 * (j + i * col_count))
term_frequency = float(match_info[x]) # f(qi, D)
docs_with_term = float(match_info[x + 2]) # n(qi)
# log( (N - n(qi) + 0.5) / (n(qi) + 0.5) )
idf = math.log(
(total_docs - docs_with_term + 0.5) /
(docs_with_term + 0.5))
if idf <= 0.0:
idf = 1e-6
doc_length = float(match_info[L_O + j]) # |D|
avg_length = float(match_info[A_O + j]) or 1. # avgdl
ratio = doc_length / avg_length
num = term_frequency * (K + 1)
b_part = 1 - B + (B * ratio)
denom = term_frequency + (K * b_part)
pc_score = idf * (num / denom)
score += (pc_score * weight)
return -score
def _json_contains(src_json, obj_json):
stack = []
try:
stack.append((json.loads(obj_json), json.loads(src_json)))
except:
# Invalid JSON!
return False
while stack:
obj, src = stack.pop()
if isinstance(src, dict):
if isinstance(obj, dict):
for key in obj:
if key not in src:
return False
stack.append((obj[key], src[key]))
elif isinstance(obj, list):
for item in obj:
if item not in src:
return False
elif obj not in src:
return False
elif isinstance(src, list):
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
try:
for i in range(len(obj)):
stack.append((obj[i], src[i]))
except IndexError:
return False
elif obj not in src:
return False
elif obj != src:
return False
return True
|
mit
| -35,110,409,590,999,170
| 33.208565
| 79
| 0.568236
| false
| 3.958976
| false
| false
| false
|
arunkgupta/gramps
|
gramps/gen/utils/file.py
|
1
|
9733
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
File and folder related utility functions
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import sys
import locale
import shutil
import logging
LOG = logging.getLogger(".gen.utils.file")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..constfunc import win, mac
from ..const import TEMP_DIR, USER_HOME
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_NEW_NAME_PATTERN = '%s%sUntitled_%d.%s'
#-------------------------------------------------------------------------
#
# Functions
#
#-------------------------------------------------------------------------
def find_file( filename):
# try the filename we got
try:
fname = filename
if os.path.isfile( filename):
return( filename)
except:
pass
# Build list of alternate encodings
encodings = set()
#Darwin returns "mac roman" for preferredencoding, but since it
#returns "UTF-8" for filesystemencoding, and that's first, this
#works.
for enc in [sys.getfilesystemencoding, locale.getpreferredencoding]:
try:
encodings.add(enc)
except:
pass
encodings.add('UTF-8')
encodings.add('ISO-8859-1')
for enc in encodings:
try:
fname = filename.encode(enc)
if os.path.isfile( fname):
return fname
except:
pass
# not found
return ''
def find_folder( filename):
# try the filename we got
try:
fname = filename
if os.path.isdir( filename):
return( filename)
except:
pass
# Build list of alternate encodings
try:
encodings = [sys.getfilesystemencoding(),
locale.getpreferredencoding(),
'UTF-8', 'ISO-8859-1']
except:
encodings = [sys.getfilesystemencoding(), 'UTF-8', 'ISO-8859-1']
encodings = list(set(encodings))
for enc in encodings:
try:
fname = filename.encode(enc)
if os.path.isdir( fname):
return fname
except:
pass
# not found
return ''
def get_unicode_path_from_file_chooser(path):
"""
Return the Unicode version of a path string.
:type path: str
:param path: The path to be converted to Unicode
:rtype: unicode
:returns: The Unicode version of path.
"""
# make only unicode of path of type 'str'
if not (isinstance(path, str)):
return path
if win():
# in windows filechooser returns officially utf-8, not filesystemencoding
try:
return unicode(path)
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
else:
try:
return unicode(path, sys.getfilesystemencoding())
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
def get_unicode_path_from_env_var(path):
"""
Return the Unicode version of a path string.
:type path: str
:param path: The path to be converted to Unicode
:rtype: unicode
:returns: The Unicode version of path.
"""
# make only unicode of path of type 'str'
if not (isinstance(path, str)):
return path
if win():
# In Windows path/filename returned from a environment variable is in filesystemencoding
try:
new_path = unicode(path, sys.getfilesystemencoding())
return new_path
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
else:
try:
return unicode(path)
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
def get_new_filename(ext, folder='~/'):
ix = 1
while os.path.isfile(os.path.expanduser(_NEW_NAME_PATTERN %
(folder, os.path.sep, ix, ext))):
ix = ix + 1
return os.path.expanduser(_NEW_NAME_PATTERN % (folder, os.path.sep, ix, ext))
def get_empty_tempdir(dirname):
""" Return path to TEMP_DIR/dirname, a guaranteed empty directory
makes intervening directories if required
fails if _file_ by that name already exists,
or for inadequate permissions to delete dir/files or create dir(s)
"""
dirpath = os.path.join(TEMP_DIR,dirname)
if os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.makedirs(dirpath)
dirpath = get_unicode_path_from_env_var(dirpath)
return dirpath
def rm_tempdir(path):
"""Remove a tempdir created with get_empty_tempdir"""
if path.startswith(TEMP_DIR) and os.path.isdir(path):
shutil.rmtree(path)
def relative_path(original, base):
"""
Calculate the relative path from base to original, with base a directory,
and original an absolute path
On problems, original is returned unchanged
"""
if not os.path.isdir(base):
return original
#original and base must be absolute paths
if not os.path.isabs(base):
return original
if not os.path.isabs(original):
return original
original = os.path.normpath(original)
base = os.path.normpath(base)
# If the db_dir and obj_dir are on different drives (win only)
# then there cannot be a relative path. Return original obj_path
(base_drive, base) = os.path.splitdrive(base)
(orig_drive, orig_name) = os.path.splitdrive(original)
if base_drive.upper() != orig_drive.upper():
return original
# Starting from the filepath root, work out how much of the filepath is
# shared by base and target.
base_list = (base).split(os.sep)
target_list = (orig_name).split(os.sep)
# make sure '/home/person' and 'c:/home/person' both give
# list ['home', 'person']
base_list = filter(None, base_list)
target_list = filter(None, target_list)
i = -1
for i in range(min(len(base_list), len(target_list))):
if base_list[i] <> target_list[i]: break
else:
#if break did not happen we are here at end, and add 1.
i += 1
rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
return os.path.join(*rel_list)
def media_path(db):
"""
Given a database, return the mediapath to use as basedir for media
"""
mpath = db.get_mediapath()
if mpath is None:
#use home dir
mpath = USER_HOME
return mpath
def media_path_full(db, filename):
"""
Given a database and a filename of a media, return the media filename
is full form, eg 'graves/tomb.png' becomes '/home/me/genea/graves/tomb.png
"""
if os.path.isabs(filename):
return filename
mpath = media_path(db)
return os.path.join(mpath, filename)
def search_for(name):
if name.startswith( '"' ):
name = name.split('"')[1]
else:
name = name.split()[0]
if win():
for i in os.environ['PATH'].split(';'):
fname = os.path.join(i, name)
if os.access(fname, os.X_OK) and not os.path.isdir(fname):
return 1
if os.access(name, os.X_OK) and not os.path.isdir(name):
return 1
else:
for i in os.environ['PATH'].split(':'):
fname = os.path.join(i, name)
if os.access(fname, os.X_OK) and not os.path.isdir(fname):
return 1
return 0
def fix_encoding(value, errors='strict'):
# The errors argument specifies the response when the input string can't be
# converted according to the encoding's rules. Legal values for this
# argument are 'strict' (raise a UnicodeDecodeError exception), 'replace'
# (add U+FFFD, 'REPLACEMENT CHARACTER'), or 'ignore' (just leave the
# character out of the Unicode result).
if not isinstance(value, unicode):
try:
return unicode(value)
except:
try:
if mac():
codeset = locale.getlocale()[1]
else:
codeset = locale.getpreferredencoding()
except:
codeset = "UTF-8"
return unicode(value, codeset, errors)
else:
return value
|
gpl-2.0
| 5,058,031,207,568,249,000
| 31.228477
| 96
| 0.577109
| false
| 4.152304
| false
| false
| false
|
e-koch/clean_masks
|
clean_mask_construct.py
|
1
|
13421
|
import numpy as np
import scipy.ndimage as nd
from signal_id import RadioMask, Noise
from radio_beam import Beam
import astropy.units as u
from astropy.io import fits
from astropy.extern import six
import astropy
from skimage.morphology import reconstruction
'''
Routines for constructing a robust clean mask.
1) Pick two sigma levels, then dilate the higher into the lower.
2) Pick two sigma levels, remove any components in the lower cut if it
doesn't contain any pixels in the higher cut mask.
'''
class CleanMask(object):
"""
Creates a robust CLEAN mask.
Parameters
----------
cube : numpy.ndarray or astropy PrimaryHDU
low_cut : float or int
Lower sigma cut.
high_cut : float or int
Higher sigma cut.
beam : Beam
Object defining the beam.
pbcoverage : numpy.ndarray
Defines the beam coverage over the image for mosaics.
pb_thresh : float
Defines a threshold between 0 and 1 to remove regions with low beam
coverage in the image.
"""
def __init__(self, cube, low_cut, high_cut, beam=None, pbcoverage=None,
pb_thresh=0.7, iteraxis=0):
super(CleanMask, self).__init__()
self._cube = cube
self.low_cut = low_cut
self.high_cut = high_cut
if isinstance(beam, Beam):
self.beam = beam
elif beam is None:
self.beam = None
else:
raise TypeError("beam must be a Beam object or None.")
if pbcoverage is not None:
if isinstance(pbcoverage, six.string_types):
pbcoverage = fits.getdata(pbcoverage)
if not isinstance(pbcoverage, np.ndarray):
raise TypeError("pbcoverage must be a numpy array.")
if pb_thresh < 0.0 or pb_thresh > 1.0:
raise Warning("pb_thresh must be between 0 and 1.")
self.pb_mask = pbcoverage > pb_thresh
self.pb_flag = True
else:
self.pb_mask = np.ones_like(cube, dtype=bool)
self.pb_flag = False
if iteraxis > len(self.cube.shape):
raise IndexError(str(iteraxis)+"is greater than the total number"
" of axes.")
self.iteraxis = iteraxis
self.restor_dims = [np.newaxis if i == 1 else slice(None)
for i in self.cube.shape]
self.restor_dims.pop(self.iteraxis)
self._low_mask = None
self._high_mask = None
self._mask = None
self._pb_applied = False
self._smoothed = False
self._method = "None"
self._pb_thresh = pb_thresh
@property
def cube(self):
return Cube(self._cube)
def make_initial_masks(self, compute_slicewise=False):
'''
Calculate the initial masks.
'''
if compute_slicewise or self.cube.huge_flag:
sums = 0.0
num_finite = 0
for plane in self.cube.generate_slice(self.iteraxis):
sums += np.nansum(plane)
num_finite += np.isfinite(plane).sum()
mean = sums / num_finite
var = 0.0
for plane in self.cube.generate_slice(self.iteraxis):
var += np.nansum(np.power(plane - mean, 2), axis=None)
std = np.sqrt(var / (num_finite - 1))
print "Slice"
print mean
print std
low_thresh = mean + self.low_cut * std
high_thresh = mean + self.high_cut * std
self._low_mask = np.zeros(self.cube.shape, dtype=bool)
self._high_mask = np.zeros(self.cube.shape, dtype=bool)
for slices in self.cube.generate_slice(self.iteraxis,
return_slice=False):
self._low_mask[slices] = self.cube[slices] > low_thresh
self._high_mask[slices] = self.cube[slices] > high_thresh
else:
mean = np.nanmean(self.cube[:])
std = np.nanstd(self.cube[:])
print "Full"
print mean
print std
low_thresh = mean + self.low_cut * std
high_thresh = mean + self.high_cut * std
self._low_mask = self.cube > low_thresh
self._high_mask = self.cube > high_thresh
@property
def low_mask(self):
return self._low_mask
@property
def high_mask(self):
return self._high_mask
@property
def mask(self):
return self._mask
@property
def method(self):
return self._method
def to_RadioMask(self, which_mask='final'):
if which_mask is 'final':
return RadioMask(self._mask, wcs=None) # Load in WCS somehow
elif which_mask is 'low':
return RadioMask(self._low_mask, wcs=None) # Load in WCS somehow
elif which_mask is 'high':
return RadioMask(self._high_mask, wcs=None) # Load in WCS somehow
else:
raise TypeError("which_mask must be 'final', 'low', or 'high'.")
def dilate_into_low(self, verbose=False):
'''
Dilates the high mask into the low using morphological reconstruction.
'''
dilate_struct = nd.generate_binary_structure(2, 3)
for i, slices in enumerate(self.cube.generate_slice(self.iteraxis,
return_slice=False)):
# Skip empty channels
if self._high_mask[slices].max() is False:
continue
if verbose:
print "Iteration %s of %s" % (str(i+1),
self.cube.shape[self.iteraxis])
self.high_mask[slices] = \
reconstruction(self.high_mask[slices].squeeze(),
self.low_mask[slices].squeeze(),
selem=dilate_struct)[self.restor_dims]
self._mask = self._high_mask
self._method = "dilate"
def remove_high_components(self, min_pix=10, beam_check=False,
pixscale=None, verbose=False):
'''
Remove components in the low mask which are not
contained in the high mask.
The criteria is set by min_pix, or is based off of the beam area.
Note that if min_pix < beam area, min_pix has no effect.
'''
# 8-connectivity
connect = np.ones((3, 3))
# Objects must be at least the beam area to be kept.
if beam_check:
# Remove this when WCS object is added.
if pixscale is None:
raise TypeError("pixscale must be specified to use beamarea")
major = self.major.to(u.deg).value/pixscale
minor = self.minor.to(u.deg).value/pixscale
# Round down by default?
# Should this be made into an optional input?
beam_pix_area = np.floor(np.pi * major * minor)
else:
beam_pix_area = 0
for i, slices in enumerate(self.cube.generate_slice(self.iteraxis,
return_slice=False)):
if verbose:
print "Iteration %s of %s" % (str(i+1),
self.cube.shape[self.iteraxis])
# Skip empty channels
if self.high_mask[slices].max() is False:
continue
low_labels, low_num = nd.label(self._low_mask[slices], connect)
for j in range(1, low_num+1):
low_pix = zip(*np.where(low_labels == j))
high_pix = zip(*np.where(self._high_mask[slices] > 0))
# Now check for overlap
matches = list(set(low_pix) & set(high_pix))
if len(matches) >= min_pix:
continue
if len(matches) > beam_pix_area:
continue
x_pos = [x for x, y in low_pix]
y_pos = [y for x, y in low_pix]
# If less than match threshold, remove region in the low mask
self._low_mask[slices][x_pos, y_pos] = 0
self._mask = self._low_mask
self._method = "remove small"
def _smooth_it(self, kern_size='beam', pixscale=None):
'''
Apply median filter to smooth the edges of the mask.
'''
if kern_size is 'beam':
if pixscale is None:
raise TypeError("pixscale must be specified to use beamarea")
footprint = self.beam.as_tophat_kernel(pixscale)
elif isinstance(kern_size, float) or isinstance(kern_size, int):
major = kern_size
minor = kern_size
footprint = np.ones((major, minor))
else:
Warning("kern_size must be 'beam', or a float or integer.")
from scipy.ndimage import median_filter
for i, slices in enumerate(self.cube.generate_slice(self.iteraxis,
return_slice=False)):
self._mask[slices] = \
median_filter(self._mask[slices],
footprint=footprint)[self.restor_dims]
self._smoothed = True
def apply_pbmask(self):
'''
Apply the given primary beam coverage mask.
'''
if self.pb_flag:
self._mask *= self.pb_mask
self._pb_applied = True
def save_to_fits(self, filename, header=None, append_comments=True):
'''
Save the final mask as a FITS file. Optionally append the parameters
used to create the mask.
'''
if header is not None and append_comments:
header["COMMENT"] = "Settings used in CleanMask: "
header["COMMENT"] = "Mask created with method "+self.method
if self._smoothed:
header["COMMENT"] = "Mask smoothed with beam kernel."
if self.pb_flag:
header["COMMENT"] = \
"Mask corrected for pb coverage with a threshold of " + \
str(self._pb_thresh)
# Set BITPIX to 8 (unsigned integer)
header["BITPIX"] = 8
hdu = fits.PrimaryHDU(self.mask.astype(">i2"), header=header)
hdu.writeto(filename)
def make_mask(self, method="dilate", compute_slicewise=False,
smooth=False, kern_size='beam', pixscale=None,
verbose=False):
self.make_initial_masks(compute_slicewise=compute_slicewise)
if method == "dilate":
self.dilate_into_low(verbose=verbose)
elif method == "remove small":
self.remove_high_components(pixscale=pixscale, verbose=verbose)
else:
raise TypeError("method must be 'dilate' or 'remove small'.")
if smooth:
self._smooth_it(kern_size=kern_size, pixscale=pixscale)
self.apply_pbmask()
class Cube(object):
"""
Cube attempts to handle numpy arrays and FITS HDUs transparently. This
is useful for massive datasets, in particular. The data is loaded in only
for the requested slice.
It is certainly *NOT* robust or complete, but handles what is needed for
creating CLEAN masks.
"""
def __init__(self, cube, huge_flag=None, huge_thresh=5e9,
squeeze=True):
self.cube = cube
if huge_flag is not None:
self.huge_flag = huge_flag
else:
self.huge_flag = self.size > huge_thresh
@property
def cube(self):
return self._cube
@cube.setter
def cube(self, input_cube):
if isinstance(input_cube, six.string_types):
input_cube = self._load_fits(input_cube)
is_array = isinstance(input_cube, np.ndarray)
is_hdu = isinstance(input_cube, astropy.io.fits.hdu.image.PrimaryHDU)
if not is_array and not is_hdu:
raise TypeError("cube must be a numpy array or an astropy "
"PrimaryHDU. Input was of type " +
str(type(input_cube)))
self._cube = input_cube
def __getitem__(self, view):
if self.is_hdu:
return self.cube.data[view]
else:
return self.cube[view]
def _load_fits(self, fitsfile, ext=0):
return fits.open(fitsfile)[ext]
def _is_hdu(self):
if hasattr(self.cube, 'header'):
return True
return False
@property
def is_hdu(self):
return self._is_hdu()
@property
def shape(self):
return self.cube.shape
@property
def size(self):
return self.cube.size
def close(self):
'''
If an HDU, close it.
'''
if self.is_hdu:
self.cube.close()
def generate_slice(self, iteraxis, return_slice=True):
slices = [slice(None)] * len(self.shape)
for i in xrange(self.shape[iteraxis]):
slices[iteraxis] = i
if return_slice:
yield self[slices]
else:
yield slices
def __gt__(self, value):
return self[:] > value
def __lt__(self, value):
return self[:] < value
def __ge__(self, value):
return self[:] >= value
def __le__(self, value):
return self[:] <= value
|
mit
| -5,155,616,146,169,940,000
| 29.502273
| 81
| 0.543402
| false
| 4.08554
| false
| false
| false
|
filannim/Temporal-Footprint
|
temporal_footprint/predict.py
|
1
|
9133
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014 Michele Filannino
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# author: Michele Filannino
# email: filannim@cs.man.ac.uk
#
# For details, see www.cs.man.ac.uk/~filannim/
from __future__ import division
from collections import namedtuple
from collections import defaultdict
from datetime import date as Date
import re
import sys
import os
import subprocess
import tempfile
import time
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm
from wikipedia2text import wikipedia_text
from properties import properties
Gaussian = namedtuple('Gaussian', ['mu', 'sigma'])
TemporalFrame = namedtuple('TemporalFrame', ['start', 'end'])
TemporalFrameResult = namedtuple('TemporalFrameResult', ['source', 'dates', 'gaussian_curve', 'optimised_gaussian_curve', 'predicted_temporal_frame', 'error'])
def HeidelTime_annotate(text):
with tempfile.NamedTemporaryFile('w+t', delete=False) as f:
name = f.name
f.write(text)
os.chdir(properties['HEIDELTIME_DIR'])
process = subprocess.Popen(['java', '-jar', 'de.unihd.dbs.heideltime.standalone.jar', name, '-l', 'ENGLISH', '-t', 'NARRATIVES'], stdout=subprocess.PIPE)
output, err = process.communicate()
os.remove(name)
os.chdir(properties['MAIN_DIR'])
return str(output)
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '%s' % self.name
print '(%.4fs)' % (time.time() - self.tstart)
class WikipediaPage(object):
def __init__(self, title, gold_start=None, gold_end=None):
self.title = title.strip()
self.text = re.sub(r'[<>]', '', wikipedia_text(title.strip(), fullURL=True)['text'])
self.HeidelTime_text = HeidelTime_annotate(self.text)
self.word_count = len(self.text.split())
self.DDDD_density = len(re.findall(r'[12][\d]{3}', self.text)) / len(self.text.split())
self.DDDD_sequences = len(re.findall(r'[12][\d]{3}', self.text))
self.temporal_frame = TemporalFrame(0.0, 0.0)
if gold_start and gold_end:
self.temporal_frame = TemporalFrame(float(gold_start), float(gold_end))
def __str__(self):
text = 'TEXT:' + self.text[0:100] + '\n'
text += '# DDDD sequences:' + str(len(re.findall(r'[12][\d]{3}', self.text))) + '\n'
text += '# characters :', str(len(self.text)) + '\n'
text += '# words :', str(len(self.text.split())) + '\n'
text += '# DDDD density :', str(len(re.findall(r'[12][\d]{3}', self.text)) / len(self.text.split()))
return text
class Predictor(object):
def __init__(self, Person, outlier_ray=7.9, gaussian_a=1.6, gaussian_b=-10):
self.person = Person
self.outlier_ray = outlier_ray
self.gaussian_a = gaussian_a
self.gaussian_b = gaussian_b
self.extraction_functions = (self.__extract_DDDD_dates, self.__extract_HeidelTime_dates)
#self.extraction_functions = (self.__extract_Baseline_dates, self.__extract_BaselineFiltered_dates, self.__extract_DDDD_dates, self.__extract_HeidelTime_dates)
self.results = self.__compute()
def __compute(self):
results = []
for function in self.extraction_functions:
source = re.findall(r'extract_([A-Za-z]+)_dates', str(function))[0]
results.append(self.__predict(source, function, self.outlier_ray, self.gaussian_a, self.gaussian_b))
return results
def __predict(self, source, function, outlier_ray, gaussian_a=1., gaussian_b=0.):
if source == 'Baseline':
dates = function(self.person.text)
predicted_temporal_frame = TemporalFrame(np.amin(dates), np.amax(dates))
error = self.__compute_error(self.person.temporal_frame, predicted_temporal_frame)
return TemporalFrameResult(source, dates, Gaussian(0,1), Gaussian(0,1), predicted_temporal_frame, error)
if source == 'BaselineFiltered':
try:
dates = function(self.person.text)
dates_filtered = self.__reject_outliers(dates, outlier_ray)
predicted_temporal_frame = TemporalFrame(np.amin(dates_filtered), np.amax(dates_filtered))
error = self.__compute_error(self.person.temporal_frame, predicted_temporal_frame)
return TemporalFrameResult(source, dates, Gaussian(0,1), Gaussian(0,1), predicted_temporal_frame, error)
except ValueError:
return TemporalFrameResult(source, dates, Gaussian(0,1), Gaussian(0,1), TemporalFrame(1000, Date.today().year), 1.0)
elif source == 'DDDD':
dates = function(self.person.text)
elif source == 'HeidelTime':
dates = function(self.person.HeidelTime_text)
else:
raise Exception('Function ' + source + 'not found!')
dates_filtered = self.__reject_outliers(dates, outlier_ray)
gaussian_curve = Gaussian._make(self.__normal_fit(dates_filtered))
optimised_gaussian_curve = Gaussian(gaussian_curve.mu+gaussian_b, gaussian_curve.sigma*gaussian_a)
predicted_temporal_frame = TemporalFrame(optimised_gaussian_curve.mu - optimised_gaussian_curve.sigma, optimised_gaussian_curve.mu + optimised_gaussian_curve.sigma)
error = self.__compute_error(self.person.temporal_frame, predicted_temporal_frame)
return TemporalFrameResult(source, dates, gaussian_curve, optimised_gaussian_curve, predicted_temporal_frame, error)
def __reject_outliers(self, dates, outlier_ray = 2.):
d = np.abs(dates - np.median(dates))
mdev = np.median(d)
s = d/mdev if mdev else 0
try:
r = dates[s<outlier_ray]
except IndexError:
r = np.array([])
if type(r) != np.ndarray:
return np.array([r])
else:
return r
def __normal_fit(self, dates):
y = map(float, dates) #y = [float(d) for d in dates]
return norm.fit(y) # returns (mu, sigma)
def __compute_error(self, gold_frame, predicted_frame):
upper_bound = np.amax((gold_frame.start, gold_frame.end, predicted_frame.start, predicted_frame.end)) #can be more elegantly rewritten
lower_bound = np.amin((gold_frame.start, gold_frame.end, predicted_frame.start, predicted_frame.end)) #can be more elegantly rewritten
union_frame = (upper_bound - lower_bound) + 1
try:
overlap = len(set(range(int(gold_frame.start), int(gold_frame.end)+1)) & set(range(int(predicted_frame.start), int(predicted_frame.end)+1)))#can I write something more NumPy-ish?
return 1 - (overlap/union_frame)
except ValueError:
return 1
def __extract_Baseline_dates(self, text):
result = np.array([float(y) for y in re.findall(r'[12][\d]{3}', text)])
if len(result)<2:
return np.array([1000,2014])
else:
return result
def __extract_BaselineFiltered_dates(self, text):
result = np.array([float(y) for y in re.findall(r'[12][\d]{3}', text)])
if len(result)<2:
return np.array([1000,2014])
else:
return result
def __extract_DDDD_dates(self, text):
result = np.array([float(y) for y in re.findall(r'[12][\d]{3}', text)])
if len(result)<2:
return np.array([1000,2014])
else:
return result
def __extract_HeidelTime_dates(self, text):
try:
dates = re.findall('value=\"([^\"]+)\"', text)
dates = [e[0:4] for e in dates if len(e)==4]
dates = [int(y) for y in dates if y.isdigit()]
return np.array(dates)
except:
return np.array([1000,2014])
def plot(self):
plt.close('all')
fig, (axarr) = plt.subplots(len(self.extraction_functions))
for i, result in enumerate(self.results):
try:
n, bins, patches = axarr[i].hist(result.dates, 100, normed=1, facecolor='blue', alpha=0.75)
axarr[i].plot(bins, mlab.normpdf(bins, result.gaussian_curve.mu, result.gaussian_curve.sigma), 'r--', linewidth=2)
axarr[i].axvspan(self.person.temporal_frame.start, self.person.temporal_frame.end, color='blue', alpha=0.3)
axarr[i].axvspan(result.predicted_temporal_frame.start, result.predicted_temporal_frame.end, color='red', alpha=0.3)
next_year = int(Date.today().year+1)
if i==0:
axarr[0].set_title(self.person.title.replace('_', ' ') + ' (' + str(int(self.person.temporal_frame.start)) + '-' + str(int(self.person.temporal_frame.end)) + ')\n' + result.source + ' prediction [' + str(int(result.predicted_temporal_frame.start)) + '-' + str(int(result.predicted_temporal_frame.end)) + '], E = ' + str(np.around(result.error, 4)))
else:
axarr[i].set_title(result.source + ' prediction [' + str(int(result.predicted_temporal_frame.start)) + '-' + str(int(result.predicted_temporal_frame.end)) + '], E = ' + str(np.around(result.error, 4)))
axarr[i].set_ylabel('freq')
axarr[i].yaxis.set_ticklabels([])
axarr[i].set_xticks(np.arange(1000,next_year, next_year/50))
axarr[i].set_xlim(1000,next_year)
print result.source, str(np.around(result.error, 4))
except:
continue
axarr[i].set_xlabel('Years (0 - ' + str(next_year) + ')')
plt.show(block=False)
#plt.savefig('pictures/' + self.person.title + '.png', dpi=300)
raw_input('Press Any Key To Exit')
def predict(title, start=None, end=None):
prediction = Predictor(WikipediaPage(title, gold_start=start, gold_end=end))
return prediction
|
apache-2.0
| -1,198,312,042,371,473,000
| 41.483721
| 353
| 0.688711
| false
| 2.952797
| false
| false
| false
|
ibelikov/jimmy
|
jimmy/modules/throttle/tests/test_throttle.py
|
1
|
15691
|
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import mockfs
import os
import pytest
import sys
import jsonschema
from jimmy import cli
from mock import call
from click.testing import CliRunner
from jimmy.lib.common import yaml_reader
from jimmy.tests import base
modules_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
jimmy_dir = os.path.dirname(modules_dir)
throttle_schema_path = os.path.join(modules_dir, 'throttle', 'resources', 'schema.yaml')
jenkins_yaml_path = os.path.join(jimmy_dir, 'sample', 'input', 'jenkins.yaml')
class TestThrottleModule(base.TestCase):
def setup_method(self, method):
self.runner = CliRunner()
def teardown_method(self, method):
mockfs.restore_builtins()
@mock.patch('jimmy.lib.core.load_py_modules')
@mock.patch('subprocess.call')
def test_cli_call(self, mock_subp, mock_modules):
with open(throttle_schema_path, 'r') as f:
mock_throttle_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({os.path.join(jimmy_dir, 'lib', 'schema.yaml'): self.jimmy_schema,
os.path.join(jimmy_dir, 'jimmy.yaml'): self.mock_jimmy_yaml,
throttle_schema_path: mock_throttle_schema,
jenkins_yaml_path: '\n'.join(
[
'jenkins:',
' throttle:',
' categories:',
' - category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1',
' - throttled_node_label: slave-label2',
' max_concurrent_per_labeled: 1',
' - category_name: category2',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0'
])
})
sys.path.insert(0, modules_dir)
import throttle
import read_source
sys.path.pop(0)
mock_modules.return_value = [throttle, read_source]
os.chdir(jimmy_dir)
self.runner.invoke(cli)
calls = [call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'clearCategories'],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'makeThrottleCategory',
'category1', '1', '0', 'slave-label1,slave-label2', '1,1'],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'makeThrottleCategory',
'category2', '1', '0', '', ''],
shell=False)
]
mock_subp.assert_has_calls(calls, any_order=True)
assert 3 == mock_subp.call_count, "subprocess call should be equal to 3"
class TestThrottleSchema(object):
def setup_method(self, method):
with open(throttle_schema_path, 'r') as f:
mock_throttle_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({throttle_schema_path: mock_throttle_schema})
self.schema = yaml_reader.read(throttle_schema_path)
def teardown_method(self, method):
mockfs.restore_builtins()
def test_valid_repo_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_validation_fail_if_category_name_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: 123',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_max_total_concurrent_builds_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: test',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_validation_fail_if_max_concurrent_builds_per_node_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: test',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_validation_fail_if_throttled_node_label_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: 123',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_max_concurrent_per_labeled_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_password_validation_fail_for_category_name_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'category_name' is a required property"
def test_password_validation_fail_for_max_total_conc_builds_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_total_concurrent_builds' is a required property"
def test_password_validation_fail_for_max_conc_builds_per_node_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_concurrent_builds_per_node' is a required property"
def test_password_validation_fail_for_throttled_node_label_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'throttled_node_label' is a required property"
def test_password_validation_fail_for_max_concurrent_per_labeled_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_concurrent_per_labeled' is a required property"
def test_validation_fail_if_categories_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_if_max_per_labeled_node_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_for_categories_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
def test_validation_fail_for_max_per_labeled_node_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
|
apache-2.0
| 8,527,842,525,457,008,000
| 43.450425
| 103
| 0.548467
| false
| 4.003828
| true
| false
| false
|
ipittau/ldutil
|
ldutil.py
|
1
|
13182
|
#!/usr/bin/python
#
# ldutil helps you to manage library dependencies on a filesystem
#
# Copyright (C) 2014 Ilario Pittau (ilariopittau[at]gmail[dot]com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import getopt, sys, subprocess, os, fnmatch, pickle, re, hashlib
from stat import *
def usage():
print "Usage: ldutil [-p <pickle_file>] [-o <output_file>] [-i <input_file> [c <check_file>] [s <search_file>]] [d <search_dir>] [vnml] [h]"
print " -o output_file : recurse the search_dir for reverse tree and write the tree in the file"
print " -p pickle_file : use pickle function to dump the lib_list dependency tree"
print " -i input_file : use the input_file as tree for the binaries, the file must be generated with the pickle feature"
print " -c check_file : print the list of libs that needs check_file"
print " -s search_file : print all occurences of search_file"
print " -d search_dir : use this dir as base for find and recurse"
print " -n : print all binaries that nobody use"
print " -m : print all binaries that have a unsatisfied dependency"
print " -l : manage symbolic links (EXPERIMENTAL)"
print " -v : verbose_flag mode on"
print "Examples:"
print " ldutil -d /usr/lib -p /tmp/pickle"
print " ldutil -d /usr/lib -i /tmp/pickle -s libc.so"
print " ldutil -d /usr/lib -i /tmp/pickle -n"
#Search a lib starting from search_dir
#@param filename: the file to search on the search_dir
#@param search_dir: the directory to use as root
#@param link_enable_flag: if true returns the link otherwise the linked file
#@return the file if founded, the linked file if flag is False, an empty string if not founded
def findout(filename, search_dir, link_enable_flag=False):
#print "finding " + filename + "..."
find_list = []
for root, dirs, files in os.walk(search_dir):
for basename in fnmatch.filter(files, filename):
found_lib = os.path.join(root, basename)
mode = os.lstat(found_lib).st_mode
if (link_enable_flag == False):
if S_ISLNK(mode):
refound = os.path.dirname(found_lib) +"/"+ os.readlink(found_lib)
#print "found " + filename + " -> " + refound
return refound
#print "found " + found_lib
return found_lib
return ""
#Function that analyze a binary and its dependencies recursively
#@param current: file to be analyzed, uses readelf -d to check dependecies, current is fullpath
#@param father: needed for the recursive step, is the father. father is "nobody" for the first step
#@return the list of dependencies of the current binary
def analyze(current, father):
#print "analyze " + current
lib_basename = os.path.basename(current)
if lib_basename in lib_list:
return []
lib_list[lib_basename] = []
#readelf regExp
re_readelf = re.compile('\(NEEDED\)[\s]+[A-Za-z\s]+: \[(.+)\]')
try:
readelf_output = subprocess.check_output("readelf -d "+current,stderr=subprocess.STDOUT,shell=True)
except subprocess.CalledProcessError, err:
#print "readelf error " + current
lib_list.pop(lib_basename)
return []
#print "analyzed " + current
#parse the output of the readelf command
sub_lib_list = re_readelf.findall(readelf_output)
#print str(len(sub_lib_list))
#analize the used libraries
for sub_lib in sub_lib_list:
#The lib has been already analyzed, we add it and its dependencies
if sub_lib in lib_list:
lib_list[lib_basename].append(sub_lib)
lib_list[lib_basename] += lib_list[sub_lib]
else:
#Search if the dependency is satisfied
found_lib = findout(sub_lib, search_dir);
#if yes add the lib and all dependencies calling a recursive step
if (found_lib != ""):
lib_list[lib_basename].append(os.path.basename(found_lib))
lib_list[lib_basename] += analyze(found_lib, current)
#otherwise write that the dependency is unsatisfied ("miss" + name of the lib)
else:
#print sub_lib + " miss for " + lib_basename
lib_list[lib_basename].append("miss " + sub_lib)
#print lib_list[lib_basename]
#this is useful to remove duplicates
lib_list[lib_basename] = list(set(lib_list[lib_basename]))
return lib_list[lib_basename]
#function that go deep on the directory and call ther recursive function analyze for each binary
#prints a dot for each file analyzed
#@param research_dir: directory to use as start point
def create_dependency_tree(research_dir):
#print subprocess.check_output("find "+search_dir, shell=True)
print "going.. " + research_dir
total_file_num = 0
file_num = 0
for root, dirs, files in os.walk(research_dir):
for new_file in files:
total_file_num = total_file_num + 1
for root, dirs, files in os.walk(research_dir):
#Analyze only files, not links
for new_file in files:
file_num = file_num + 1
print ("Analyze " + str(file_num) +"/"+ str(total_file_num) )
sys.stdout.write("\033[F")
pathname = os.path.join(root, new_file)
mode = os.lstat(pathname).st_mode
#links are skipped
if S_ISLNK(mode):
#print "link " + pathname + " " + str(mode)
pass
elif S_ISREG(mode):
# It's a file, call the recursive function to analyze it
#print "analyze " + pathname
analyze(pathname, "nobody")
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
pass
#Function to calculate the reverse tree starting from the dependency list
def reverse_analysis():
for lib in lib_list.keys():
#Add the lib to reverse_lib_list if it's not present
if lib not in reverse_lib_list:
reverse_lib_list[lib] = []
for father_lib in lib_list.keys():
if lib in lib_list[father_lib]:
reverse_lib_list[lib].append(father_lib)
def link_managment():
print "Duplicate libs"
for lib in lib_list.keys():
lib_found = findout(lib, search_dir, True)
if re.match('.+\.so.+',lib):#if is a lib
#filename = os.path.splitext(lib)
lib_no_version = lib.split(".so.")[0] + ".so"
for num_version in lib.split(".so.")[1].split("."):
fullname_lib_no_version = os.path.join(os.path.dirname(lib_found), lib_no_version)
#lib_no_version_found = findout(lib_no_version, search_dir, True)
print "Tring... " + fullname_lib_no_version
if not os.path.exists(fullname_lib_no_version) or not S_ISLNK(os.lstat(fullname_lib_no_version).st_mode):
print lib_no_version + " -> " + lib_found + " ?"
if os.path.exists(fullname_lib_no_version):
print fullname_lib_no_version + " exist, do you want replace it with the symlink?"
else:
print fullname_lib_no_version + " not exist, do you want create a new the symlink?"
response = raw_input()
if response == "y" :
print "create: " + fullname_lib_no_version
os.symlink(lib, fullname_lib_no_version)
else:
print "pass..."
else:
print fullname_lib_no_version + " link exist!"
lib_no_version += "." + num_version
#Main (this is the point where the program start)
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:vc:nd:s:o:p:ml", ["input="])
except getopt.GetoptError, err:
print "error"
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
#variable initialization
#input file prepared with the dependency tree
input_file = ""
#if enabled prints a lot of debug
verbose_flag = False
#if enabled print libs that nobody uses
nobody_flag = False
#if enabled print libs that have unsatisfied dependency
miss_flag = False
#if enabled start the interactive managment of symbolic links"
link_flag = False
#print the list of libs that needs check_file
check_file = ""
#Print lib_list and reverse_lib_list
print_all=False
#default fs directory to scan
search_dir="/tmp/asd"
#used to print all occurrences of the lib
search_file=""
#file output with the filter output
output_file=""
#file output with the prescanned dependency tree
pickle_file=""
try:
#option parsing
for o, a in opts:
if o == "-v":
verbose_flag = True
elif o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-i", "--input"):
input_file = a
elif o in ("-o", "--output"):
output_file = a
elif o in ("-c"):
check_file = a
elif o in ("-n"):
nobody_flag = True
elif o in ("-d"):
search_dir = a
elif o in ("-s"):
search_file = a
elif o in ("-p"):
pickle_file = a
elif o in ("-m"):
miss_flag = True
elif o in ("-l"):
link_flag = True
else:
assert False, "unhandled option"
#Contains all libs on the system and their dependencies
lib_list = dict()
#Contains all libs on the system and the bins/libs that depend on it
reverse_lib_list=dict()
#If the output file is set, create the dependency tree
if input_file == "":
create_dependency_tree(search_dir)
else:
#otherwise open the pickle file and load the lib_list
input_fd = open(input_file,"rb");
lib_list = pickle.load(input_fd);
input_fd.close()
#Open the pickle file and dump the list on it then exit
if pickle_file != "":
pickle_fd = open(pickle_file,"wb");
pickle.dump(lib_list,pickle_fd);
pickle_fd.close()
print ""
sys.exit(0)
#Perform the reverse analysis after the scan of the folder
reverse_analysis()
#Output file is used to save the output of the request feature
if output_file != "" :
output_fd = open(output_file, "w")
#MAIN SWITCH over the implemented features
#link managment is EXPERIMENTAL
if (link_flag == True):
link_managment()
elif( check_file != ""):
#Prints the bins that uses che check_file
if (check_file in reverse_lib_list):
#Print the checkfile full name path
print "This is the list of binaries that are using " + findout(check_file, search_dir)
#Print every lib in its reverse list
for lib in reverse_lib_list[check_file]:
if output_file != "" :
output_fd.write(findout(k, search_dir) + "\n")
print " " + findout(lib, search_dir)
else:
print "not found"
elif (nobody_flag):
#Prints the library that nobody is using and theoretically could be deleted
print "This is the list of libraries (.so) that nobody uses:"
for k, v in reverse_lib_list.iteritems():
if len(reverse_lib_list[k]) == 0 :
#print only the .so files
if re.match('.+\.so*',k):
if output_file != "" :
output_fd.write(findout(k, search_dir) + "\n")
lib_found = findout(k, search_dir)
if lib_found == "":
print k + " not found!"
else:
print lib_found
elif (search_file):
#Prints each occurence of the searched file
for lib_filter in fnmatch.filter(lib_list, "*"+search_file+"*"):
search_file_found = findout(lib_filter, search_dir)
print "###################################################################"
if S_ISLNK(os.lstat(search_file_found).st_mode):
print search_file_found + " is a link"
else:
print search_file_found + " is not a link"
print lib_filter + " is used by:"
print reverse_lib_list[lib_filter]
print lib_filter + " uses:"
print lib_list[lib_filter]
elif (miss_flag):
#Print a missing dependecy
for k, v in lib_list.iteritems():
for basename in fnmatch.filter(lib_list[k], "miss*"):
print k + " " + basename
elif (print_all):
#Print the list and reverse list
for k, v in lib_list.iteritems():
print k
print v
print "###################"
for k, v in reverse_lib_list.iteritems():
print k
print v
except KeyboardInterrupt:
print "Byee!"
#Close the file
if output_file != "" :
output_fd.close()
|
gpl-2.0
| 2,598,428,152,028,018,000
| 34.723577
| 144
| 0.616826
| false
| 3.666759
| false
| false
| false
|
nlehuby/OSM_snippets
|
navitia-to-OSM (bus routes)/route_to_html.py
|
1
|
6357
|
#-------------------------------------------------------------------------------
# Author: nlehuby
#
# Created: 28/01/2015
# Copyright: (c) nlehuby 2015
# Licence: MIT
#-------------------------------------------------------------------------------
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
import csv
def prepare_osm_routes():
"""
reconstruit les infos osm nécessaires
"""
source_csv = csv.reader(open("collecte/relations_routes.csv", "rb"))
result_list = []
for an_osm_route in source_csv :
if len(an_osm_route) < 6:
print ("il faut appeler Overpass pour récupérer les infos manquantes : TODO")
else :
result_list.append(an_osm_route)
#tri
result_int = []
result_other= []
for a_route in result_list:
try:
int(a_route[1])
result_int.append(a_route)
except ValueError :
result_other.append(a_route)
result_int.sort(key=lambda osm_route: int(osm_route[1]))
result_other.sort(key=lambda osm_route: osm_route[1])
result_list = result_int + result_other
with open("rendu/sources/osm_parcours.csv", "wb") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in result_list:
writer.writerow(line)
def prepare_navitia_routes():
"""
reconstruit les infos navitia nécessaires
"""
source_csv = csv.reader(open("rapprochement/osm_navitia.csv", "rb"))
result_list = []
for a_nav_route in source_csv :
if len(a_nav_route) < 5:
print ("il faut appeler navitia pour récupérer les infos manquantes : TODO")
else :
result_list.append(a_nav_route)
with open("rendu/sources/navitia_parcours.csv", "wb") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in result_list:
writer.writerow(line)
def to_html():
"""
crée la page d'index listant référençant les pages de chaque parcours OSM
"""
prepare_osm_routes()
prepare_navitia_routes()
osm_csv = csv.reader(open("rendu/sources/osm_parcours.csv", "rb"))
navitia_csv = list(csv.reader(open("rendu/sources/navitia_parcours.csv", "rb")))
autocomplete = {"parcours_osm":[]}
template_table = ''
for osm_route in osm_csv:
print osm_route[2]
#création de l'objet pour l'autocomplétion
parcours = {}
parcours['value'] = osm_route [0]
parcours['label'] = "[{}] {} > {}".format(osm_route[5], osm_route[1], osm_route[3])
rapp = [route for route in navitia_csv if route[0] == osm_route[0]] #rapprochement osm navitia
print (rapp)
if rapp != []:
print ('ok')
parcours['url'] = "bus_route.htm?osm={}&navitia={}".format(osm_route[0], rapp[0][1] )
#current_osm_route = {'id' : osm_route[0], 'name': osm_route[2], 'ref': osm_route[1], 'nb_stops': osm_route[4]}
#current_nav_route = {'id' : rapp[0][1], 'name' : rapp[0][2], 'nb_stops': rapp[0][3]}
#ajout dans l'index
liste_template = """
<tr>
<td> %%network%%
</td>
<td> %%route_code%%
</td>
<td>
<a href="bus_route.htm?osm=%%relation_id%%&navitia=%%navitia_id%%">%%relation_name%%</a>
</td>
<td>
%%OSM_nb_stops%%/%%navitia_nb_stops%%
</td>
<td>
<progress value="%%OSM_nb_stops%%" max="%%navitia_nb_stops%%">état de la carto de la route</progress>
</td>
<tr>
"""
liste_template = liste_template.replace("%%route_code%%", osm_route[1] )
liste_template = liste_template.replace("%%relation_id%%", osm_route[0] )
liste_template = liste_template.replace("%%relation_name%%", osm_route[2] )
liste_template = liste_template.replace("%%network%%", osm_route[5] )
liste_template = liste_template.replace("%%OSM_nb_stops%%", osm_route[4] )
liste_template = liste_template.replace("%%navitia_nb_stops%%", rapp[0][3] )
liste_template = liste_template.replace("%%navitia_id%%", rapp[0][1] )
else:
print ('ko')
parcours['url'] = "bus_route.htm?osm={}".format(osm_route[0])
liste_template = """
<tr>
<td> %%network%%
</td>
<td> %%route_code%%
</td>
<td>
<a href="bus_route.htm?osm=%%relation_id%%">%%relation_name%%</a>
</td>
<td colspan=2>
%%OSM_nb_stops%%
</td>
<tr>
"""
liste_template = liste_template.replace("%%route_code%%", osm_route[1] )
liste_template = liste_template.replace("%%relation_id%%", osm_route[0] )
liste_template = liste_template.replace("%%relation_name%%", osm_route[2] )
liste_template = liste_template.replace("%%network%%", osm_route[5] )
liste_template = liste_template.replace("%%OSM_nb_stops%%", osm_route[4] )
#persistance autocomplétion
autocomplete['parcours_osm'].append(parcours)
template_table += liste_template
#persistance de la page d'index
now = datetime.datetime.now()
mon_fichier = open("rendu/assets/template_liste.html", "r")
template = mon_fichier.read()
mon_fichier.close()
template = template.replace("%%tableau_des_routes%%", template_table )
template = template.replace("%%date_du_jour%%", now.strftime("%d/%m/%Y %H:%M") )
mon_fichier = open("rendu/index.html", "wb")
mon_fichier.write(template)
mon_fichier.close()
#persistance du fichier d'autocomplétion
json.dump(autocomplete, open('rendu/osm_parcours.json', "w"), indent=4)
if __name__ == '__main__':
to_html()
|
mit
| -1,326,381,942,221,232,000
| 34.436782
| 125
| 0.504732
| false
| 3.379531
| false
| false
| false
|
spacy-io/spaCy
|
spacy/tests/test_misc.py
|
1
|
11269
|
import pytest
import os
import ctypes
from pathlib import Path
from spacy.about import __version__ as spacy_version
from spacy import util
from spacy import prefer_gpu, require_gpu, require_cpu
from spacy.ml._precomputable_affine import PrecomputableAffine
from spacy.ml._precomputable_affine import _backprop_precomputable_affine_padding
from spacy.util import dot_to_object, SimpleFrozenList
from thinc.api import Config, Optimizer, ConfigValidationError
from spacy.training.batchers import minibatch_by_words
from spacy.lang.en import English
from spacy.lang.nl import Dutch
from spacy.language import DEFAULT_CONFIG_PATH
from spacy.schemas import ConfigSchemaTraining
from thinc.api import get_current_ops, NumpyOps, CupyOps
from .util import get_random_doc
@pytest.fixture
def is_admin():
"""Determine if the tests are run as admin or not."""
try:
admin = os.getuid() == 0
except AttributeError:
admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return admin
@pytest.mark.parametrize("text", ["hello/world", "hello world"])
def test_util_ensure_path_succeeds(text):
path = util.ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize(
"package,result", [("numpy", True), ("sfkodskfosdkfpsdpofkspdof", False)]
)
def test_util_is_package(package, result):
"""Test that an installed package via pip is recognised by util.is_package."""
assert util.is_package(package) is result
@pytest.mark.parametrize("package", ["thinc"])
def test_util_get_package_path(package):
"""Test that a Path object is returned for a package name."""
path = util.get_package_path(package)
assert isinstance(path, Path)
def test_PrecomputableAffine(nO=4, nI=5, nF=3, nP=2):
model = PrecomputableAffine(nO=nO, nI=nI, nF=nF, nP=nP).initialize()
assert model.get_param("W").shape == (nF, nO, nP, nI)
tensor = model.ops.alloc((10, nI))
Y, get_dX = model.begin_update(tensor)
assert Y.shape == (tensor.shape[0] + 1, nF, nO, nP)
dY = model.ops.alloc((15, nO, nP))
ids = model.ops.alloc((15, nF))
ids[1, 2] = -1
dY[1] = 1
assert not model.has_grad("pad")
d_pad = _backprop_precomputable_affine_padding(model, dY, ids)
assert d_pad[0, 2, 0, 0] == 1.0
ids.fill(0.0)
dY.fill(0.0)
dY[0] = 0
ids[1, 2] = 0
ids[1, 1] = -1
ids[1, 0] = -1
dY[1] = 1
ids[2, 0] = -1
dY[2] = 5
d_pad = _backprop_precomputable_affine_padding(model, dY, ids)
assert d_pad[0, 0, 0, 0] == 6
assert d_pad[0, 1, 0, 0] == 1
assert d_pad[0, 2, 0, 0] == 0
def test_prefer_gpu():
try:
import cupy # noqa: F401
prefer_gpu()
assert isinstance(get_current_ops(), CupyOps)
except ImportError:
assert not prefer_gpu()
def test_require_gpu():
try:
import cupy # noqa: F401
require_gpu()
assert isinstance(get_current_ops(), CupyOps)
except ImportError:
with pytest.raises(ValueError):
require_gpu()
def test_require_cpu():
require_cpu()
assert isinstance(get_current_ops(), NumpyOps)
try:
import cupy # noqa: F401
require_gpu()
assert isinstance(get_current_ops(), CupyOps)
except ImportError:
pass
require_cpu()
assert isinstance(get_current_ops(), NumpyOps)
def test_ascii_filenames():
"""Test that all filenames in the project are ASCII.
See: https://twitter.com/_inesmontani/status/1177941471632211968
"""
root = Path(__file__).parent.parent
for path in root.glob("**/*"):
assert all(ord(c) < 128 for c in path.name), path.name
def test_load_model_blank_shortcut():
"""Test that using a model name like "blank:en" works as a shortcut for
spacy.blank("en").
"""
nlp = util.load_model("blank:en")
assert nlp.lang == "en"
assert nlp.pipeline == []
with pytest.raises(ImportError):
util.load_model("blank:fjsfijsdof")
@pytest.mark.parametrize(
"version,constraint,compatible",
[
(spacy_version, spacy_version, True),
(spacy_version, f">={spacy_version}", True),
("3.0.0", "2.0.0", False),
("3.2.1", ">=2.0.0", True),
("2.2.10a1", ">=1.0.0,<2.1.1", False),
("3.0.0.dev3", ">=1.2.3,<4.5.6", True),
("n/a", ">=1.2.3,<4.5.6", None),
("1.2.3", "n/a", None),
("n/a", "n/a", None),
],
)
def test_is_compatible_version(version, constraint, compatible):
assert util.is_compatible_version(version, constraint) is compatible
@pytest.mark.parametrize(
"constraint,expected",
[
("3.0.0", False),
("==3.0.0", False),
(">=2.3.0", True),
(">2.0.0", True),
("<=2.0.0", True),
(">2.0.0,<3.0.0", False),
(">=2.0.0,<3.0.0", False),
("!=1.1,>=1.0,~=1.0", True),
("n/a", None),
],
)
def test_is_unconstrained_version(constraint, expected):
assert util.is_unconstrained_version(constraint) is expected
@pytest.mark.parametrize(
"a1,a2,b1,b2,is_match",
[
("3.0.0", "3.0", "3.0.1", "3.0", True),
("3.1.0", "3.1", "3.2.1", "3.2", False),
("xxx", None, "1.2.3.dev0", "1.2", False),
],
)
def test_minor_version(a1, a2, b1, b2, is_match):
assert util.get_minor_version(a1) == a2
assert util.get_minor_version(b1) == b2
assert util.is_minor_version_match(a1, b1) is is_match
assert util.is_minor_version_match(a2, b2) is is_match
@pytest.mark.parametrize(
"dot_notation,expected",
[
(
{"token.pos": True, "token._.xyz": True},
{"token": {"pos": True, "_": {"xyz": True}}},
),
(
{"training.batch_size": 128, "training.optimizer.learn_rate": 0.01},
{"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}},
),
],
)
def test_dot_to_dict(dot_notation, expected):
result = util.dot_to_dict(dot_notation)
assert result == expected
assert util.dict_to_dot(result) == dot_notation
def test_set_dot_to_object():
config = {"foo": {"bar": 1, "baz": {"x": "y"}}, "test": {"a": {"b": "c"}}}
with pytest.raises(KeyError):
util.set_dot_to_object(config, "foo.bar.baz", 100)
with pytest.raises(KeyError):
util.set_dot_to_object(config, "hello.world", 100)
with pytest.raises(KeyError):
util.set_dot_to_object(config, "test.a.b.c", 100)
util.set_dot_to_object(config, "foo.bar", 100)
assert config["foo"]["bar"] == 100
util.set_dot_to_object(config, "foo.baz.x", {"hello": "world"})
assert config["foo"]["baz"]["x"]["hello"] == "world"
assert config["test"]["a"]["b"] == "c"
util.set_dot_to_object(config, "foo", 123)
assert config["foo"] == 123
util.set_dot_to_object(config, "test", "hello")
assert dict(config) == {"foo": 123, "test": "hello"}
@pytest.mark.parametrize(
"doc_sizes, expected_batches",
[
([400, 400, 199], [3]),
([400, 400, 199, 3], [4]),
([400, 400, 199, 3, 200], [3, 2]),
([400, 400, 199, 3, 1], [5]),
([400, 400, 199, 3, 1, 1500], [5]), # 1500 will be discarded
([400, 400, 199, 3, 1, 200], [3, 3]),
([400, 400, 199, 3, 1, 999], [3, 3]),
([400, 400, 199, 3, 1, 999, 999], [3, 2, 1, 1]),
([1, 2, 999], [3]),
([1, 2, 999, 1], [4]),
([1, 200, 999, 1], [2, 2]),
([1, 999, 200, 1], [2, 2]),
],
)
def test_util_minibatch(doc_sizes, expected_batches):
docs = [get_random_doc(doc_size) for doc_size in doc_sizes]
tol = 0.2
batch_size = 1000
batches = list(
minibatch_by_words(docs, size=batch_size, tolerance=tol, discard_oversize=True)
)
assert [len(batch) for batch in batches] == expected_batches
max_size = batch_size + batch_size * tol
for batch in batches:
assert sum([len(doc) for doc in batch]) < max_size
@pytest.mark.parametrize(
"doc_sizes, expected_batches",
[
([400, 4000, 199], [1, 2]),
([400, 400, 199, 3000, 200], [1, 4]),
([400, 400, 199, 3, 1, 1500], [1, 5]),
([400, 400, 199, 3000, 2000, 200, 200], [1, 1, 3, 2]),
([1, 2, 9999], [1, 2]),
([2000, 1, 2000, 1, 1, 1, 2000], [1, 1, 1, 4]),
],
)
def test_util_minibatch_oversize(doc_sizes, expected_batches):
""" Test that oversized documents are returned in their own batch"""
docs = [get_random_doc(doc_size) for doc_size in doc_sizes]
tol = 0.2
batch_size = 1000
batches = list(
minibatch_by_words(docs, size=batch_size, tolerance=tol, discard_oversize=False)
)
assert [len(batch) for batch in batches] == expected_batches
def test_util_dot_section():
cfg_string = """
[nlp]
lang = "en"
pipeline = ["textcat"]
[components]
[components.textcat]
factory = "textcat"
[components.textcat.model]
@architectures = "spacy.TextCatBOW.v1"
exclusive_classes = true
ngram_size = 1
no_output_layer = false
"""
nlp_config = Config().from_str(cfg_string)
en_nlp = util.load_model_from_config(nlp_config, auto_fill=True)
default_config = Config().from_disk(DEFAULT_CONFIG_PATH)
default_config["nlp"]["lang"] = "nl"
nl_nlp = util.load_model_from_config(default_config, auto_fill=True)
# Test that creation went OK
assert isinstance(en_nlp, English)
assert isinstance(nl_nlp, Dutch)
assert nl_nlp.pipe_names == []
assert en_nlp.pipe_names == ["textcat"]
# not exclusive_classes
assert en_nlp.get_pipe("textcat").model.attrs["multi_label"] is False
# Test that default values got overwritten
assert en_nlp.config["nlp"]["pipeline"] == ["textcat"]
assert nl_nlp.config["nlp"]["pipeline"] == [] # default value []
# Test proper functioning of 'dot_to_object'
with pytest.raises(KeyError):
dot_to_object(en_nlp.config, "nlp.pipeline.tagger")
with pytest.raises(KeyError):
dot_to_object(en_nlp.config, "nlp.unknownattribute")
T = util.registry.resolve(nl_nlp.config["training"], schema=ConfigSchemaTraining)
assert isinstance(dot_to_object({"training": T}, "training.optimizer"), Optimizer)
def test_simple_frozen_list():
t = SimpleFrozenList(["foo", "bar"])
assert t == ["foo", "bar"]
assert t.index("bar") == 1 # okay method
with pytest.raises(NotImplementedError):
t.append("baz")
with pytest.raises(NotImplementedError):
t.sort()
with pytest.raises(NotImplementedError):
t.extend(["baz"])
with pytest.raises(NotImplementedError):
t.pop()
t = SimpleFrozenList(["foo", "bar"], error="Error!")
with pytest.raises(NotImplementedError):
t.append("baz")
def test_resolve_dot_names():
config = {
"training": {"optimizer": {"@optimizers": "Adam.v1"}},
"foo": {"bar": "training.optimizer", "baz": "training.xyz"},
}
result = util.resolve_dot_names(config, ["training.optimizer"])
assert isinstance(result[0], Optimizer)
with pytest.raises(ConfigValidationError) as e:
util.resolve_dot_names(config, ["training.xyz", "training.optimizer"])
errors = e.value.errors
assert len(errors) == 1
assert errors[0]["loc"] == ["training", "xyz"]
|
mit
| 5,117,938,492,506,105,000
| 31.289398
| 88
| 0.59961
| false
| 3.07644
| true
| false
| false
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckan/logic/validators.py
|
1
|
27447
|
# encoding: utf-8
import collections
import datetime
from itertools import count
import re
import mimetypes
import ckan.lib.navl.dictization_functions as df
import ckan.logic as logic
import ckan.lib.helpers as h
from ckan.model import (MAX_TAG_LENGTH, MIN_TAG_LENGTH,
PACKAGE_NAME_MIN_LENGTH, PACKAGE_NAME_MAX_LENGTH,
PACKAGE_VERSION_MAX_LENGTH,
VOCABULARY_NAME_MAX_LENGTH,
VOCABULARY_NAME_MIN_LENGTH)
import ckan.authz as authz
from ckan.common import _
Invalid = df.Invalid
StopOnError = df.StopOnError
Missing = df.Missing
missing = df.missing
def owner_org_validator(key, data, errors, context):
value = data.get(key)
if value is missing or value is None:
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
data.pop(key, None)
raise df.StopOnError
model = context['model']
user = context['user']
user = model.User.get(user)
if value == '':
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
return
group = model.Group.get(value)
if not group:
raise Invalid(_('Organization does not exist'))
group_id = group.id
if not(user.sysadmin or
authz.has_user_permission_for_group_or_org(
group_id, user.name, 'create_dataset')):
raise Invalid(_('You cannot add a dataset to this organization'))
data[key] = group_id
def package_id_not_changed(value, context):
package = context.get('package')
if package and value != package.id:
raise Invalid('Cannot change value of key from %s to %s. '
'This key is read-only' % (package.id, value))
return value
def int_validator(value, context):
'''
Return an integer for value, which may be a string in base 10 or
a numeric type (e.g. int, long, float, Decimal, Fraction). Return
None for None or empty/all-whitespace string values.
:raises: ckan.lib.navl.dictization_functions.Invalid for other
inputs or non-whole values
'''
if value is None:
return None
if hasattr(value, 'strip') and not value.strip():
return None
try:
whole, part = divmod(value, 1)
except TypeError:
try:
return int(value)
except ValueError:
pass
else:
if not part:
try:
return int(whole)
except TypeError:
pass # complex number: fail like int(complex) does
raise Invalid(_('Invalid integer'))
def natural_number_validator(value, context):
value = int_validator(value, context)
if value < 0:
raise Invalid(_('Must be a natural number'))
return value
def is_positive_integer(value, context):
value = int_validator(value, context)
if value < 1:
raise Invalid(_('Must be a postive integer'))
return value
def boolean_validator(value, context):
'''
Return a boolean for value.
Return value when value is a python bool type.
Return True for strings 'true', 'yes', 't', 'y', and '1'.
Return False in all other cases, including when value is an empty string or
None
'''
if value is missing or value is None:
return False
if isinstance(value, bool):
return value
if value.lower() in ['true', 'yes', 't', 'y', '1']:
return True
return False
def isodate(value, context):
if isinstance(value, datetime.datetime):
return value
if value == '':
return None
try:
date = h.date_str_to_datetime(value)
except (TypeError, ValueError), e:
raise Invalid(_('Date format incorrect'))
return date
def no_http(value, context):
model = context['model']
session = context['session']
if 'http:' in value:
raise Invalid(_('No links are allowed in the log_message.'))
return value
def package_id_exists(value, context):
model = context['model']
session = context['session']
result = session.query(model.Package).get(value)
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))
return value
def package_id_does_not_exist(value, context):
model = context['model']
session = context['session']
result = session.query(model.Package).get(value)
if result:
raise Invalid(_('Dataset id already exists'))
return value
def package_name_exists(value, context):
model = context['model']
session = context['session']
result = session.query(model.Package).filter_by(name=value).first()
if not result:
raise Invalid(_('Not found') + ': %s' % value)
return value
def package_id_or_name_exists(package_id_or_name, context):
'''Return the given package_id_or_name if such a package exists.
:raises: ckan.lib.navl.dictization_functions.Invalid if there is no
package with the given id or name
'''
model = context['model']
session = context['session']
result = session.query(model.Package).get(package_id_or_name)
if result:
return package_id_or_name
result = session.query(model.Package).filter_by(
name=package_id_or_name).first()
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))
return package_id_or_name
def resource_id_exists(value, context):
model = context['model']
session = context['session']
if not session.query(model.Resource).get(value):
raise Invalid('%s: %s' % (_('Not found'), _('Resource')))
return value
def user_id_exists(user_id, context):
'''Raises Invalid if the given user_id does not exist in the model given
in the context, otherwise returns the given user_id.
'''
model = context['model']
session = context['session']
result = session.query(model.User).get(user_id)
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('User')))
return user_id
def user_id_or_name_exists(user_id_or_name, context):
'''Return the given user_id_or_name if such a user exists.
:raises: ckan.lib.navl.dictization_functions.Invalid if no user can be
found with the given id or user name
'''
model = context['model']
session = context['session']
result = session.query(model.User).get(user_id_or_name)
if result:
return user_id_or_name
result = session.query(model.User).filter_by(name=user_id_or_name).first()
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('User')))
return user_id_or_name
def group_id_exists(group_id, context):
'''Raises Invalid if the given group_id does not exist in the model given
in the context, otherwise returns the given group_id.
'''
model = context['model']
session = context['session']
result = session.query(model.Group).get(group_id)
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('Group')))
return group_id
def group_id_or_name_exists(reference, context):
'''
Raises Invalid if a group identified by the name or id cannot be found.
'''
model = context['model']
result = model.Group.get(reference)
if not result:
raise Invalid(_('That group name or ID does not exist.'))
return reference
def activity_type_exists(activity_type):
'''Raises Invalid if there is no registered activity renderer for the
given activity_type. Otherwise returns the given activity_type.
This just uses object_id_validators as a lookup.
very safe.
'''
if activity_type in object_id_validators:
return activity_type
else:
raise Invalid('%s: %s' % (_('Not found'), _('Activity type')))
# A dictionary mapping activity_type values from activity dicts to functions
# for validating the object_id values from those same activity dicts.
object_id_validators = {
'new package' : package_id_exists,
'changed package' : package_id_exists,
'deleted package' : package_id_exists,
'follow dataset' : package_id_exists,
'new user' : user_id_exists,
'changed user' : user_id_exists,
'follow user' : user_id_exists,
'new group' : group_id_exists,
'changed group' : group_id_exists,
'deleted group' : group_id_exists,
'new organization' : group_id_exists,
'changed organization' : group_id_exists,
'deleted organization' : group_id_exists,
'follow group' : group_id_exists,
}
def object_id_validator(key, activity_dict, errors, context):
'''Validate the 'object_id' value of an activity_dict.
Uses the object_id_validators dict (above) to find and call an 'object_id'
validator function for the given activity_dict's 'activity_type' value.
Raises Invalid if the model given in context contains no object of the
correct type (according to the 'activity_type' value of the activity_dict)
with the given ID.
Raises Invalid if there is no object_id_validator for the activity_dict's
'activity_type' value.
'''
activity_type = activity_dict[('activity_type',)]
if object_id_validators.has_key(activity_type):
object_id = activity_dict[('object_id',)]
return object_id_validators[activity_type](object_id, context)
else:
raise Invalid('There is no object_id validator for '
'activity type "%s"' % activity_type)
name_match = re.compile('[a-z0-9_\-]*$')
def name_validator(value, context):
'''Return the given value if it's a valid name, otherwise raise Invalid.
If it's a valid name, the given value will be returned unmodified.
This function applies general validation rules for names of packages,
groups, users, etc.
Most schemas also have their own custom name validator function to apply
custom validation rules after this function, for example a
``package_name_validator()`` to check that no package with the given name
already exists.
:raises ckan.lib.navl.dictization_functions.Invalid: if ``value`` is not
a valid name
'''
if not isinstance(value, basestring):
raise Invalid(_('Names must be strings'))
# check basic textual rules
if value in ['new', 'edit', 'search']:
raise Invalid(_('That name cannot be used'))
if len(value) < 2:
raise Invalid(_('Must be at least %s characters long') % 2)
if len(value) > PACKAGE_NAME_MAX_LENGTH:
raise Invalid(_('Name must be a maximum of %i characters long') % \
PACKAGE_NAME_MAX_LENGTH)
if not name_match.match(value):
raise Invalid(_('Must be purely lowercase alphanumeric '
'(ascii) characters and these symbols: -_'))
return value
def package_name_validator(key, data, errors, context):
model = context['model']
session = context['session']
package = context.get('package')
query = session.query(model.Package.name).filter_by(name=data[key])
if package:
package_id = package.id
else:
package_id = data.get(key[:-1] + ('id',))
if package_id and package_id is not missing:
query = query.filter(model.Package.id <> package_id)
result = query.first()
if result:
errors[key].append(_('That URL is already in use.'))
value = data[key]
if len(value) < PACKAGE_NAME_MIN_LENGTH:
raise Invalid(
_('Name "%s" length is less than minimum %s') % (value, PACKAGE_NAME_MIN_LENGTH)
)
if len(value) > PACKAGE_NAME_MAX_LENGTH:
raise Invalid(
_('Name "%s" length is more than maximum %s') % (value, PACKAGE_NAME_MAX_LENGTH)
)
def package_version_validator(value, context):
if len(value) > PACKAGE_VERSION_MAX_LENGTH:
raise Invalid(_('Version must be a maximum of %i characters long') % \
PACKAGE_VERSION_MAX_LENGTH)
return value
def duplicate_extras_key(key, data, errors, context):
unflattened = df.unflatten(data)
extras = unflattened.get('extras', [])
extras_keys = []
for extra in extras:
if not extra.get('deleted'):
extras_keys.append(extra['key'])
for extra_key in set(extras_keys):
extras_keys.remove(extra_key)
if extras_keys:
key_ = ('extras_validation',)
assert key_ not in errors
errors[key_] = [_('Duplicate key "%s"') % extras_keys[0]]
def group_name_validator(key, data, errors, context):
model = context['model']
session = context['session']
group = context.get('group')
query = session.query(model.Group.name).filter_by(name=data[key])
if group:
group_id = group.id
else:
group_id = data.get(key[:-1] + ('id',))
if group_id and group_id is not missing:
query = query.filter(model.Group.id <> group_id)
result = query.first()
if result:
errors[key].append(_('Group name already exists in database'))
def tag_length_validator(value, context):
if len(value) < MIN_TAG_LENGTH:
raise Invalid(
_('Tag "%s" length is less than minimum %s') % (value, MIN_TAG_LENGTH)
)
if len(value) > MAX_TAG_LENGTH:
raise Invalid(
_('Tag "%s" length is more than maximum %i') % (value, MAX_TAG_LENGTH)
)
return value
def tag_name_validator(value, context):
tagname_match = re.compile('[\w \-.]*$', re.UNICODE)
if not tagname_match.match(value):
raise Invalid(_('Tag "%s" must be alphanumeric '
'characters or symbols: -_.') % (value))
return value
def tag_not_uppercase(value, context):
tagname_uppercase = re.compile('[A-Z]')
if tagname_uppercase.search(value):
raise Invalid(_('Tag "%s" must not be uppercase' % (value)))
return value
def tag_string_convert(key, data, errors, context):
'''Takes a list of tags that is a comma-separated string (in data[key])
and parses tag names. These are added to the data dict, enumerated. They
are also validated.'''
if isinstance(data[key], basestring):
tags = [tag.strip() \
for tag in data[key].split(',') \
if tag.strip()]
else:
tags = data[key]
current_index = max( [int(k[1]) for k in data.keys() if len(k) == 3 and k[0] == 'tags'] + [-1] )
for num, tag in zip(count(current_index+1), tags):
data[('tags', num, 'name')] = tag
for tag in tags:
tag_length_validator(tag, context)
tag_name_validator(tag, context)
def ignore_not_admin(key, data, errors, context):
# Deprecated in favour of ignore_not_package_admin
return ignore_not_package_admin(key, data, errors, context)
def ignore_not_package_admin(key, data, errors, context):
'''Ignore if the user is not allowed to administer the package specified.'''
model = context['model']
user = context.get('user')
if 'ignore_auth' in context:
return
if user and authz.is_sysadmin(user):
return
authorized = False
pkg = context.get('package')
if pkg:
try:
logic.check_access('package_change_state',context)
authorized = True
except logic.NotAuthorized:
authorized = False
if (user and pkg and authorized):
return
# allow_state_change in the context will allow the state to be changed
# FIXME is this the best way to cjeck for state only?
if key == ('state',) and context.get('allow_state_change'):
return
data.pop(key)
def ignore_not_sysadmin(key, data, errors, context):
'''Ignore the field if user not sysadmin or ignore_auth in context.'''
user = context.get('user')
ignore_auth = context.get('ignore_auth')
if ignore_auth or (user and authz.is_sysadmin(user)):
return
data.pop(key)
def ignore_not_group_admin(key, data, errors, context):
'''Ignore if the user is not allowed to administer for the group specified.'''
model = context['model']
user = context.get('user')
if user and authz.is_sysadmin(user):
return
authorized = False
group = context.get('group')
if group:
try:
logic.check_access('group_change_state',context)
authorized = True
except logic.NotAuthorized:
authorized = False
if (user and group and authorized):
return
data.pop(key)
def user_name_validator(key, data, errors, context):
'''Validate a new user name.
Append an error message to ``errors[key]`` if a user named ``data[key]``
already exists. Otherwise, do nothing.
:raises ckan.lib.navl.dictization_functions.Invalid: if ``data[key]`` is
not a string
:rtype: None
'''
model = context['model']
new_user_name = data[key]
if not isinstance(new_user_name, basestring):
raise Invalid(_('User names must be strings'))
user = model.User.get(new_user_name)
if user is not None:
# A user with new_user_name already exists in the database.
user_obj_from_context = context.get('user_obj')
if user_obj_from_context and user_obj_from_context.id == user.id:
# If there's a user_obj in context with the same id as the user
# found in the db, then we must be doing a user_update and not
# updating the user name, so don't return an error.
return
else:
# Otherwise return an error: there's already another user with that
# name, so you can create a new user with that name or update an
# existing user's name to that name.
errors[key].append(_('That login name is not available.'))
def user_both_passwords_entered(key, data, errors, context):
password1 = data.get(('password1',),None)
password2 = data.get(('password2',),None)
if password1 is None or password1 == '' or \
password2 is None or password2 == '':
errors[('password',)].append(_('Please enter both passwords'))
def user_password_validator(key, data, errors, context):
value = data[key]
if isinstance(value, Missing):
pass
elif not isinstance(value, basestring):
errors[('password',)].append(_('Passwords must be strings'))
elif value == '':
pass
elif len(value) < 4:
errors[('password',)].append(_('Your password must be 4 characters or longer'))
def user_passwords_match(key, data, errors, context):
password1 = data.get(('password1',),None)
password2 = data.get(('password2',),None)
if not password1 == password2:
errors[key].append(_('The passwords you entered do not match'))
else:
#Set correct password
data[('password',)] = password1
def user_password_not_empty(key, data, errors, context):
'''Only check if password is present if the user is created via action API.
If not, user_both_passwords_entered will handle the validation'''
# sysadmin may provide password_hash directly for importing users
if (data.get(('password_hash',), missing) is not missing and
authz.is_sysadmin(context.get('user'))):
return
if not ('password1',) in data and not ('password2',) in data:
password = data.get(('password',),None)
if not password:
errors[key].append(_('Missing value'))
def user_about_validator(value,context):
if 'http://' in value or 'https://' in value:
raise Invalid(_('Edit not allowed as it looks like spam. Please avoid links in your description.'))
return value
def vocabulary_name_validator(name, context):
model = context['model']
session = context['session']
if len(name) < VOCABULARY_NAME_MIN_LENGTH:
raise Invalid(_('Name must be at least %s characters long') %
VOCABULARY_NAME_MIN_LENGTH)
if len(name) > VOCABULARY_NAME_MAX_LENGTH:
raise Invalid(_('Name must be a maximum of %i characters long') %
VOCABULARY_NAME_MAX_LENGTH)
query = session.query(model.Vocabulary.name).filter_by(name=name)
result = query.first()
if result:
raise Invalid(_('That vocabulary name is already in use.'))
return name
def vocabulary_id_not_changed(value, context):
vocabulary = context.get('vocabulary')
if vocabulary and value != vocabulary.id:
raise Invalid(_('Cannot change value of key from %s to %s. '
'This key is read-only') % (vocabulary.id, value))
return value
def vocabulary_id_exists(value, context):
model = context['model']
session = context['session']
result = session.query(model.Vocabulary).get(value)
if not result:
raise Invalid(_('Tag vocabulary was not found.'))
return value
def tag_in_vocabulary_validator(value, context):
model = context['model']
session = context['session']
vocabulary = context.get('vocabulary')
if vocabulary:
query = session.query(model.Tag)\
.filter(model.Tag.vocabulary_id==vocabulary.id)\
.filter(model.Tag.name==value)\
.count()
if not query:
raise Invalid(_('Tag %s does not belong to vocabulary %s') % (value, vocabulary.name))
return value
def tag_not_in_vocabulary(key, tag_dict, errors, context):
tag_name = tag_dict[('name',)]
if not tag_name:
raise Invalid(_('No tag name'))
if tag_dict.has_key(('vocabulary_id',)):
vocabulary_id = tag_dict[('vocabulary_id',)]
else:
vocabulary_id = None
model = context['model']
session = context['session']
query = session.query(model.Tag)
query = query.filter(model.Tag.vocabulary_id==vocabulary_id)
query = query.filter(model.Tag.name==tag_name)
count = query.count()
if count > 0:
raise Invalid(_('Tag %s already belongs to vocabulary %s') %
(tag_name, vocabulary_id))
else:
return
def url_validator(key, data, errors, context):
''' Checks that the provided value (if it is present) is a valid URL '''
import urlparse
import string
model = context['model']
session = context['session']
url = data.get(key, None)
if not url:
return
pieces = urlparse.urlparse(url)
if all([pieces.scheme, pieces.netloc]) and \
set(pieces.netloc) <= set(string.letters + string.digits + '-.') and \
pieces.scheme in ['http', 'https']:
return
errors[key].append(_('Please provide a valid URL'))
def user_name_exists(user_name, context):
model = context['model']
session = context['session']
result = session.query(model.User).filter_by(name=user_name).first()
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('User')))
return result.name
def role_exists(role, context):
if role not in authz.ROLE_PERMISSIONS:
raise Invalid(_('role does not exist.'))
return role
def datasets_with_no_organization_cannot_be_private(key, data, errors,
context):
dataset_id = data.get(('id',))
owner_org = data.get(('owner_org',))
private = data[key] is True
check_passed = True
if not dataset_id and private and not owner_org:
# When creating a dataset, enforce it directly
check_passed = False
elif dataset_id and private and not owner_org:
# Check if the dataset actually has an owner_org, even if not provided
try:
dataset_dict = logic.get_action('package_show')({},
{'id': dataset_id})
if not dataset_dict.get('owner_org'):
check_passed = False
except logic.NotFound:
check_passed = False
if not check_passed:
errors[key].append(
_("Datasets with no organization can't be private."))
def list_of_strings(key, data, errors, context):
value = data.get(key)
if not isinstance(value, list):
raise Invalid(_('Not a list'))
for x in value:
if not isinstance(x, basestring):
raise Invalid('%s: %s' % (_('Not a string'), x))
def if_empty_guess_format(key, data, errors, context):
value = data[key]
resource_id = data.get(key[:-1] + ('id',))
# if resource_id then an update
if (not value or value is Missing) and not resource_id:
url = data.get(key[:-1] + ('url',), '')
mimetype, encoding = mimetypes.guess_type(url)
if mimetype:
data[key] = mimetype
def clean_format(format):
return h.unified_resource_format(format)
def no_loops_in_hierarchy(key, data, errors, context):
'''Checks that the parent groups specified in the data would not cause
a loop in the group hierarchy, and therefore cause the recursion up/down
the hierarchy to get into an infinite loop.
'''
if not 'id' in data:
# Must be a new group - has no children, so no chance of loops
return
group = context['model'].Group.get(data['id'])
allowable_parents = group.\
groups_allowed_to_be_its_parent(type=group.type)
for parent in data['groups']:
parent_name = parent['name']
# a blank name signifies top level, which is always allowed
if parent_name and context['model'].Group.get(parent_name) \
not in allowable_parents:
raise Invalid(_('This parent would create a loop in the '
'hierarchy'))
def filter_fields_and_values_should_have_same_length(key, data, errors, context):
convert_to_list_if_string = logic.converters.convert_to_list_if_string
fields = convert_to_list_if_string(data.get(('filter_fields',), []))
values = convert_to_list_if_string(data.get(('filter_values',), []))
if len(fields) != len(values):
msg = _('"filter_fields" and "filter_values" should have the same length')
errors[('filter_fields',)].append(msg)
errors[('filter_values',)].append(msg)
def filter_fields_and_values_exist_and_are_valid(key, data, errors, context):
convert_to_list_if_string = logic.converters.convert_to_list_if_string
fields = convert_to_list_if_string(data.get(('filter_fields',)))
values = convert_to_list_if_string(data.get(('filter_values',)))
if not fields:
errors[('filter_fields',)].append(_('"filter_fields" is required when '
'"filter_values" is filled'))
if not values:
errors[('filter_values',)].append(_('"filter_values" is required when '
'"filter_fields" is filled'))
filters = collections.defaultdict(list)
for field, value in zip(fields, values):
filters[field].append(value)
data[('filters',)] = dict(filters)
def extra_key_not_in_root_schema(key, data, errors, context):
for schema_key in context.get('schema_keys', []):
if schema_key == data[key]:
raise Invalid(_('There is a schema field with the same name'))
def empty_if_not_sysadmin(key, data, errors, context):
'''Only sysadmins may pass this value'''
from ckan.lib.navl.validators import empty
user = context.get('user')
ignore_auth = context.get('ignore_auth')
if ignore_auth or (user and authz.is_sysadmin(user)):
return
empty(key, data, errors, context)
|
gpl-3.0
| 4,499,609,140,008,467,000
| 31.989183
| 107
| 0.626407
| false
| 3.886576
| false
| false
| false
|
narunask/silly_chatbot
|
rtmbot/app/plugins/chatbot.py
|
1
|
1552
|
#!/usr/bin/env python3
# coding: utf-8
from rtmbot.core import Plugin
from chatterbot import ChatBot
from plugins.console import Command
# Sessions
SESS = {}
# Init ChatBots
BOTS = ['HAL 9000', 'Wall-E', 'Agent Smith']
TRAINER='chatterbot.trainers.ChatterBotCorpusTrainer'
BOT_DICT = {B: ChatBot(B, trainer=TRAINER) for B in BOTS}
# Train based on the english corpus
#for B in BOT_DICT.values():
# B.train("chatterbot.corpus.english")
class Reply(Plugin):
def process_message(self, data):
print(data)
channel = data['channel']
if not channel.startswith("D"):
return
user = data['user']
team = data['team']
# User ID
uid = '_'.join([user,team])
bot = SESS.get(uid, None)
cmd = Command(bot=bot, bot_dict=BOT_DICT)
question = data['text'].strip()
if bot:
print(bot.name, 'is processing question:', question)
else:
print('Processing question:', question)
bot_response = cmd.run(q=question)
if cmd.error:
self.outputs.append([channel, '<BOT> {answer}'.format(answer=cmd.error)])
elif cmd.bot:
bot = cmd.bot
SESS[uid] = bot
answ_dict = dict(bot=bot.name, answer=bot_response)
self.outputs.append([channel, '<BOT@{bot}> {answer}'.format(**answ_dict)])
elif not cmd.bot:
if uid in SESS:
del SESS[uid]
self.outputs.append([channel, '<BOT> {answer}'.format(answer=bot_response)])
|
mit
| 2,409,432,908,618,128,000
| 27.740741
| 88
| 0.586985
| false
| 3.448889
| false
| false
| false
|
jds2001/ocp-checkbox
|
plugins/jobs_info.py
|
1
|
13008
|
#
# This file is part of Checkbox.
#
# Copyright 2010 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import sys
import difflib
import gettext
import logging
from collections import defaultdict
from gettext import gettext as _
from checkbox.lib.resolver import Resolver
from checkbox.arguments import coerce_arguments
from checkbox.plugin import Plugin
from checkbox.properties import (
Float,
Int,
List,
Map,
Path,
String,
)
job_schema = Map({
"plugin": String(),
"name": String(),
"type": String(required=False),
"status": String(required=False),
"suite": String(required=False),
"description": String(required=False),
"purpose": String(required=False),
"steps": String(required=False),
"info": String(required=False),
"verification": String(required=False),
"command": String(required=False),
"depends": List(String(), required=False),
"duration": Float(required=False),
"environ": List(String(), required=False),
"requires": List(String(), separator=r"\n", required=False),
"resources": List(String(), required=False),
"estimated_duration": Float(required=False),
"timeout": Int(required=False),
"user": String(required=False),
"data": String(required=False)})
class JobsInfo(Plugin):
# Domain for internationalization
domain = String(default="checkbox")
# Space separated list of directories where job files are stored.
directories = List(Path(),
default_factory=lambda: "%(checkbox_share)s/jobs")
# List of jobs to blacklist
blacklist = List(String(), default_factory=lambda: "")
# Path to blacklist file
blacklist_file = Path(required=False)
# List of jobs to whitelist
whitelist = List(String(), default_factory=lambda: "")
# Path to whitelist file
whitelist_file = Path(required=False)
def register(self, manager):
super(JobsInfo, self).register(manager)
self.whitelist_patterns = self.get_patterns(
self.whitelist, self.whitelist_file)
self.blacklist_patterns = self.get_patterns(
self.blacklist, self.blacklist_file)
self.selected_jobs = defaultdict(list)
self._missing_dependencies_report = ""
self._manager.reactor.call_on("prompt-begin", self.prompt_begin)
self._manager.reactor.call_on("gather", self.gather)
if logging.getLogger().getEffectiveLevel() <= logging.DEBUG:
self._manager.reactor.call_on(
"prompt-gather", self.post_gather, 90)
self._manager.reactor.call_on("report-job", self.report_job, -100)
def prompt_begin(self, interface):
"""
Capture interface object to use it later
to display errors
"""
self.interface = interface
self.unused_patterns = (
self.whitelist_patterns + self.blacklist_patterns)
def check_ordered_messages(self, messages):
"""Return whether the list of messages are ordered or not.
Also populates a _missing_dependencies_report string variable
with a report of any jobs that are required but not present
in the whitelist."""
names_so_far = set()
all_names = set([message['name'] for message in messages])
messages_ordered = True
missing_dependencies = defaultdict(set)
for message in messages:
name = message["name"]
for dependency in message.get("depends", []):
if dependency not in names_so_far:
messages_ordered = False
#Two separate checks :) we *could* save a negligible
#bit of time by putting this inside the previous "if"
#but we're not in *that* big a hurry.
if dependency not in all_names:
missing_dependencies[name].add(dependency)
names_so_far.add(name)
#Now assemble the list of missing deps into a nice report
jobs_and_missing_deps = ["{} required by {}".format(job_name,
", ".join(missing_dependencies[job_name]))
for job_name in missing_dependencies]
self._missing_dependencies_report = "\n".join(jobs_and_missing_deps)
return messages_ordered
def get_patterns(self, strings, filename=None):
"""Return the list of strings as compiled regular expressions."""
if filename:
try:
file = open(filename)
except IOError as e:
error_message = (_("Failed to open file '%s': %s")
% (filename, e.strerror))
logging.critical(error_message)
sys.stderr.write("%s\n" % error_message)
sys.exit(os.EX_NOINPUT)
else:
strings.extend([l.strip() for l in file.readlines()])
return [re.compile(r"^%s$" % s) for s in strings
if s and not s.startswith("#")]
def get_unique_messages(self, messages):
"""Return the list of messages without any duplicates, giving
precedence to messages that are the longest.
"""
unique_messages = []
unique_indexes = {}
for message in messages:
name = message["name"]
index = unique_indexes.get(name)
if index is None:
unique_indexes[name] = len(unique_messages)
unique_messages.append(message)
elif len(message) > len(unique_messages[index]):
unique_messages[index] = message
return unique_messages
def gather(self):
# Register temporary handler for report-message events
messages = []
def report_message(message):
if self.whitelist_patterns:
name = message["name"]
names = [name for p in self.whitelist_patterns
if p.match(name)]
if not names:
return
messages.append(message)
# Set domain and message event handler
old_domain = gettext.textdomain()
gettext.textdomain(self.domain)
event_id = self._manager.reactor.call_on(
"report-message", report_message, 100)
for directory in self.directories:
self._manager.reactor.fire("message-directory", directory)
for message in messages:
self._manager.reactor.fire("report-job", message)
# Unset domain and event handler
self._manager.reactor.cancel_call(event_id)
gettext.textdomain(old_domain)
# Get unique messages from the now complete list
messages = self.get_unique_messages(messages)
# Apply whitelist ordering
if self.whitelist_patterns:
def key_function(obj):
name = obj["name"]
for pattern in self.whitelist_patterns:
if pattern.match(name):
return self.whitelist_patterns.index(pattern)
messages = sorted(messages, key=key_function)
if not self.check_ordered_messages(messages):
#One of two things may have happened if we enter this code path.
#Either the jobs are not in topological ordering,
#Or they are in topological ordering but a dependency is
#missing.
old_message_names = [
message["name"] + "\n" for message in messages]
resolver = Resolver(key_func=lambda m: m["name"])
for message in messages:
resolver.add(
message, *message.get("depends", []))
messages = resolver.get_dependents()
if (self.whitelist_patterns and
logging.getLogger().getEffectiveLevel() <= logging.DEBUG):
new_message_names = [
message["name"] + "\n" for message in messages]
#This will contain a report of out-of-order jobs.
detailed_text = "".join(
difflib.unified_diff(
old_message_names,
new_message_names,
"old whitelist",
"new whitelist"))
#First, we report missing dependencies, if any.
if self._missing_dependencies_report:
primary = _("Dependencies are missing so some jobs "
"will not run.")
secondary = _("To fix this, close checkbox and add "
"the missing dependencies to the "
"whitelist.")
self._manager.reactor.fire("prompt-warning",
self.interface,
primary,
secondary,
self._missing_dependencies_report)
#If detailed_text is empty, it means the problem
#was missing dependencies, which we already reported.
#Otherwise, we also need to report reordered jobs here.
if detailed_text:
primary = _("Whitelist not topologically ordered")
secondary = _("Jobs will be reordered to fix broken "
"dependencies")
self._manager.reactor.fire("prompt-warning",
self.interface,
primary,
secondary,
detailed_text)
self._manager.reactor.fire("report-jobs", messages)
def post_gather(self, interface):
"""
Verify that all patterns were used
"""
if logging.getLogger().getEffectiveLevel() > logging.DEBUG:
return
orphan_test_cases = []
for name, jobs in self.selected_jobs.items():
is_test = any(job.get('type') == 'test' for job in jobs)
has_suite = any(job.get('suite') for job in jobs)
if is_test and not has_suite:
orphan_test_cases.append(name)
if orphan_test_cases:
detailed_error = \
('Test cases not included in any test suite:\n'
'{0}\n\n'
'This might cause problems '
'when uploading test cases results.\n'
'Please make sure that the patterns you used are up-to-date\n'
.format('\n'.join(['- {0}'.format(tc)
for tc in orphan_test_cases])))
self._manager.reactor.fire('prompt-warning', self.interface,
'Orphan test cases detected',
"Some test cases aren't included "
'in any test suite',
detailed_error)
if self.unused_patterns:
detailed_error = \
('Unused patterns:\n'
'{0}\n\n'
"Please make sure that the patterns you used are up-to-date\n"
.format('\n'.join(['- {0}'.format(p.pattern[1:-1])
for p in self.unused_patterns])))
self._manager.reactor.fire('prompt-warning', self.interface,
'Unused patterns',
'Please make sure that the patterns '
'you used are up-to-date',
detailed_error)
@coerce_arguments(job=job_schema)
def report_job(self, job):
name = job["name"]
patterns = self.whitelist_patterns or self.blacklist_patterns
if patterns:
match = next((p for p in patterns if p.match(name)), None)
if match:
# Keep track of which patterns didn't match any job
if match in self.unused_patterns:
self.unused_patterns.remove(match)
self.selected_jobs[name].append(job)
else:
# Stop if job not in whitelist or in blacklist
self._manager.reactor.stop()
factory = JobsInfo
|
gpl-3.0
| 8,009,267,436,187,903,000
| 38.180723
| 79
| 0.555889
| false
| 4.777084
| true
| false
| false
|
oniwan/GCI
|
sele.py
|
1
|
1328
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import time
driver = webdriver.PhantomJS()
url = "https://lzone.daiwa.co.jp/lzone/common/authorize"
username='shinichiro.ueno@gci.jp'
password = 'gcigci'
driver.get(url)
driver.save_screenshot('search_result2.png')
print driver.current_url
driver.find_element_by_css_selector('input[name="memberId"]').send_keys(username)
driver.find_element_by_css_selector('input[name="passWord"]').send_keys(password)
driver.save_screenshot('search_result3.png')
#driver.find_element_by_class_name('button-login').send_keys(Keys.ENTER)
driver.find_element_by_id('image-btn_ok').send_keys(Keys.ENTER)
print driver.current_url
driver.save_screenshot('search_result410.png')
'''
wait = WebDriverWait(driver, 10)
driver.get(url)
print driver.current_url
driver.save_screenshot('search_result1.png')
driver.find_element_by_css_selector('input[name="account"]').send_keys(username)
driver.find_element_by_css_selector('input[name="password"]').send_keys(password)
driver.find_element_by_class_name('button-login').send_keys(Keys.ENTER)
time.sleep(10)
print driver.current_url
driver.save_screenshot('search_result2.png')
print "end"
'''
|
mit
| 2,879,929,457,836,766,700
| 29.883721
| 81
| 0.779367
| false
| 3.081206
| false
| true
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.